You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nifi.apache.org by ph...@apache.org on 2018/06/06 14:14:29 UTC

[01/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Repository: nifi-minifi-cpp
Updated Branches:
  refs/heads/master bc6d2a120 -> 7528d23ee


http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/rdkafkacpp_int.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/rdkafkacpp_int.h b/thirdparty/librdkafka-0.11.4/src-cpp/rdkafkacpp_int.h
new file mode 100644
index 0000000..8db39e8
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/rdkafkacpp_int.h
@@ -0,0 +1,910 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKACPP_INT_H_
+#define _RDKAFKACPP_INT_H_
+
+#include <string>
+#include <iostream>
+#include <cstring>
+#include <stdlib.h>
+
+#include "rdkafkacpp.h"
+
+extern "C" {
+#include "../src/rdkafka.h"
+}
+
+#ifdef _MSC_VER
+typedef int mode_t;
+#pragma warning(disable : 4250)
+#endif
+
+
+namespace RdKafka {
+
+
+void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque);
+void log_cb_trampoline (const rd_kafka_t *rk, int level,
+                        const char *fac, const char *buf);
+void error_cb_trampoline (rd_kafka_t *rk, int err, const char *reason,
+                          void *opaque);
+void throttle_cb_trampoline (rd_kafka_t *rk, const char *broker_name,
+			     int32_t broker_id, int throttle_time_ms,
+			     void *opaque);
+int stats_cb_trampoline (rd_kafka_t *rk, char *json, size_t json_len,
+                         void *opaque);
+int socket_cb_trampoline (int domain, int type, int protocol, void *opaque);
+int open_cb_trampoline (const char *pathname, int flags, mode_t mode,
+                        void *opaque);
+void rebalance_cb_trampoline (rd_kafka_t *rk,
+                              rd_kafka_resp_err_t err,
+                              rd_kafka_topic_partition_list_t *c_partitions,
+                              void *opaque);
+void offset_commit_cb_trampoline0 (
+        rd_kafka_t *rk,
+        rd_kafka_resp_err_t err,
+        rd_kafka_topic_partition_list_t *c_offsets, void *opaque);
+
+rd_kafka_topic_partition_list_t *
+    partitions_to_c_parts (const std::vector<TopicPartition*> &partitions);
+
+/**
+ * @brief Update the application provided 'partitions' with info from 'c_parts'
+ */
+void update_partitions_from_c_parts (std::vector<TopicPartition*> &partitions,
+                                     const rd_kafka_topic_partition_list_t *c_parts);
+
+
+class EventImpl : public Event {
+ public:
+  ~EventImpl () {};
+
+  EventImpl (Type type, ErrorCode err, Severity severity,
+             const char *fac, const char *str):
+  type_(type), err_(err), severity_(severity), fac_(fac ? fac : ""),
+	  str_(str), id_(0), throttle_time_(0) {};
+
+  EventImpl (Type type):
+  type_(type), err_(ERR_NO_ERROR), severity_(EVENT_SEVERITY_EMERG),
+	  fac_(""), str_(""), id_(0), throttle_time_(0) {};
+
+  Type        type () const { return type_; }
+  ErrorCode   err () const { return err_; }
+  Severity    severity () const { return severity_; }
+  std::string fac () const { return fac_; }
+  std::string str () const { return str_; }
+  std::string broker_name () const {
+	  if (type_ == EVENT_THROTTLE)
+		  return str_;
+	  else
+		  return std::string("");
+  }
+  int         broker_id () const { return id_; }
+  int         throttle_time () const { return throttle_time_; }
+
+  Type        type_;
+  ErrorCode   err_;
+  Severity    severity_;
+  std::string fac_;
+  std::string str_;         /* reused for THROTTLE broker_name */
+  int         id_;
+  int         throttle_time_;
+};
+
+
+class MessageImpl : public Message {
+ public:
+  ~MessageImpl () {
+    if (free_rkmessage_)
+      rd_kafka_message_destroy(const_cast<rd_kafka_message_t *>(rkmessage_));
+    if (key_)
+            delete key_;
+  };
+
+  MessageImpl (RdKafka::Topic *topic, rd_kafka_message_t *rkmessage):
+  topic_(topic), rkmessage_(rkmessage), free_rkmessage_(true), key_(NULL) {}
+
+  MessageImpl (RdKafka::Topic *topic, rd_kafka_message_t *rkmessage,
+               bool dofree):
+  topic_(topic), rkmessage_(rkmessage), free_rkmessage_(dofree), key_(NULL) { }
+
+  MessageImpl (rd_kafka_message_t *rkmessage):
+  topic_(NULL), rkmessage_(rkmessage), free_rkmessage_(true), key_(NULL) {
+    if (rkmessage->rkt) {
+      /* Possibly NULL */
+      topic_ = static_cast<Topic *>(rd_kafka_topic_opaque(rkmessage->rkt));
+    }
+  }
+
+  /* Create errored message */
+  MessageImpl (RdKafka::Topic *topic, RdKafka::ErrorCode err):
+  topic_(topic), free_rkmessage_(false), key_(NULL) {
+    rkmessage_ = &rkmessage_err_;
+    memset(&rkmessage_err_, 0, sizeof(rkmessage_err_));
+    rkmessage_err_.err = static_cast<rd_kafka_resp_err_t>(err);
+  }
+
+  std::string         errstr() const {
+    /* FIXME: If there is an error string in payload (for consume_cb)
+     *        it wont be shown since 'payload' is reused for errstr
+     *        and we cant distinguish between consumer and producer.
+     *        For the producer case the payload needs to be the original
+     *        payload pointer. */
+    const char *es = rd_kafka_err2str(rkmessage_->err);
+    return std::string(es ? es : "");
+  }
+
+  ErrorCode           err () const {
+    return static_cast<RdKafka::ErrorCode>(rkmessage_->err);
+  }
+
+  Topic              *topic () const { return topic_; }
+  std::string         topic_name  () const {
+          if (rkmessage_->rkt)
+                  return rd_kafka_topic_name(rkmessage_->rkt);
+          else
+                  return "";
+  }
+  int32_t             partition () const { return rkmessage_->partition; }
+  void               *payload () const { return rkmessage_->payload; }
+  size_t              len () const { return rkmessage_->len; }
+  const std::string  *key () const {
+    if (key_) {
+      return key_;
+    } else if (rkmessage_->key) {
+      key_ = new std::string(static_cast<char const*>(rkmessage_->key), rkmessage_->key_len);
+      return key_;
+    }
+    return NULL;
+  }
+  const void         *key_pointer () const { return rkmessage_->key; }
+  size_t              key_len () const { return rkmessage_->key_len; }
+
+  int64_t             offset () const { return rkmessage_->offset; }
+
+  MessageTimestamp   timestamp () const {
+	  MessageTimestamp ts;
+	  rd_kafka_timestamp_type_t tstype;
+	  ts.timestamp = rd_kafka_message_timestamp(rkmessage_, &tstype);
+	  ts.type = static_cast<MessageTimestamp::MessageTimestampType>(tstype);
+	  return ts;
+  }
+
+  void               *msg_opaque () const { return rkmessage_->_private; };
+
+  int64_t             latency () const {
+          return rd_kafka_message_latency(rkmessage_);
+  }
+
+  struct rd_kafka_message_s *c_ptr () {
+          return rkmessage_;
+  }
+
+  RdKafka::Topic *topic_;
+  rd_kafka_message_t *rkmessage_;
+  bool free_rkmessage_;
+  /* For error signalling by the C++ layer the .._err_ message is
+   * used as a place holder and rkmessage_ is set to point to it. */
+  rd_kafka_message_t rkmessage_err_;
+  mutable std::string *key_; /* mutable because it's a cached value */
+
+private:
+  /* "delete" copy ctor + copy assignment, for safety of key_ */
+  MessageImpl(MessageImpl const&) /*= delete*/;
+  MessageImpl& operator=(MessageImpl const&) /*= delete*/;
+};
+
+
+class ConfImpl : public Conf {
+ public:
+  ConfImpl()
+      :consume_cb_(NULL),
+      dr_cb_(NULL),
+      event_cb_(NULL),
+      socket_cb_(NULL),
+      open_cb_(NULL),
+      partitioner_cb_(NULL),
+      partitioner_kp_cb_(NULL),
+      rebalance_cb_(NULL),
+      offset_commit_cb_(NULL),
+      rk_conf_(NULL),
+      rkt_conf_(NULL){}
+  ~ConfImpl () {
+    if (rk_conf_)
+      rd_kafka_conf_destroy(rk_conf_);
+    else if (rkt_conf_)
+      rd_kafka_topic_conf_destroy(rkt_conf_);
+  }
+
+  Conf::ConfResult set(const std::string &name,
+                       const std::string &value,
+                       std::string &errstr);
+
+  Conf::ConfResult set (const std::string &name, DeliveryReportCb *dr_cb,
+                        std::string &errstr) {
+    if (name != "dr_cb") {
+      errstr = "Invalid value type, expected RdKafka::DeliveryReportCb";
+      return Conf::CONF_INVALID;
+    }
+
+    if (!rk_conf_) {
+      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+      return Conf::CONF_INVALID;
+    }
+
+    dr_cb_ = dr_cb;
+    return Conf::CONF_OK;
+  }
+
+  Conf::ConfResult set (const std::string &name, EventCb *event_cb,
+                        std::string &errstr) {
+    if (name != "event_cb") {
+      errstr = "Invalid value type, expected RdKafka::EventCb";
+      return Conf::CONF_INVALID;
+    }
+
+    if (!rk_conf_) {
+      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+      return Conf::CONF_INVALID;
+    }
+
+    event_cb_ = event_cb;
+    return Conf::CONF_OK;
+  }
+
+  Conf::ConfResult set (const std::string &name, const Conf *topic_conf,
+                        std::string &errstr) {
+    const ConfImpl *tconf_impl =
+        dynamic_cast<const RdKafka::ConfImpl *>(topic_conf);
+    if (name != "default_topic_conf" || !tconf_impl->rkt_conf_) {
+      errstr = "Invalid value type, expected RdKafka::Conf";
+      return Conf::CONF_INVALID;
+    }
+
+    if (!rk_conf_) {
+      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+      return Conf::CONF_INVALID;
+    }
+
+    rd_kafka_conf_set_default_topic_conf(rk_conf_,
+                                         rd_kafka_topic_conf_dup(tconf_impl->
+                                                                 rkt_conf_));
+
+    return Conf::CONF_OK;
+  }
+
+  Conf::ConfResult set (const std::string &name, PartitionerCb *partitioner_cb,
+                        std::string &errstr) {
+    if (name != "partitioner_cb") {
+      errstr = "Invalid value type, expected RdKafka::PartitionerCb";
+      return Conf::CONF_INVALID;
+    }
+
+    if (!rkt_conf_) {
+      errstr = "Requires RdKafka::Conf::CONF_TOPIC object";
+      return Conf::CONF_INVALID;
+    }
+
+    partitioner_cb_ = partitioner_cb;
+    return Conf::CONF_OK;
+  }
+
+  Conf::ConfResult set (const std::string &name,
+                        PartitionerKeyPointerCb *partitioner_kp_cb,
+                        std::string &errstr) {
+    if (name != "partitioner_key_pointer_cb") {
+      errstr = "Invalid value type, expected RdKafka::PartitionerKeyPointerCb";
+      return Conf::CONF_INVALID;
+    }
+
+    if (!rkt_conf_) {
+      errstr = "Requires RdKafka::Conf::CONF_TOPIC object";
+      return Conf::CONF_INVALID;
+    }
+
+    partitioner_kp_cb_ = partitioner_kp_cb;
+    return Conf::CONF_OK;
+  }
+
+  Conf::ConfResult set (const std::string &name, SocketCb *socket_cb,
+                        std::string &errstr) {
+    if (name != "socket_cb") {
+      errstr = "Invalid value type, expected RdKafka::SocketCb";
+      return Conf::CONF_INVALID;
+    }
+
+    if (!rk_conf_) {
+      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+      return Conf::CONF_INVALID;
+    }
+
+    socket_cb_ = socket_cb;
+    return Conf::CONF_OK;
+  }
+
+
+  Conf::ConfResult set (const std::string &name, OpenCb *open_cb,
+                        std::string &errstr) {
+    if (name != "open_cb") {
+      errstr = "Invalid value type, expected RdKafka::OpenCb";
+      return Conf::CONF_INVALID;
+    }
+
+    if (!rk_conf_) {
+      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+      return Conf::CONF_INVALID;
+    }
+
+    open_cb_ = open_cb;
+    return Conf::CONF_OK;
+  }
+
+
+
+
+  Conf::ConfResult set (const std::string &name, RebalanceCb *rebalance_cb,
+                        std::string &errstr) {
+    if (name != "rebalance_cb") {
+      errstr = "Invalid value type, expected RdKafka::RebalanceCb";
+      return Conf::CONF_INVALID;
+    }
+
+    if (!rk_conf_) {
+      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+      return Conf::CONF_INVALID;
+    }
+
+    rebalance_cb_ = rebalance_cb;
+    return Conf::CONF_OK;
+  }
+
+
+  Conf::ConfResult set (const std::string &name,
+                        OffsetCommitCb *offset_commit_cb,
+                        std::string &errstr) {
+    if (name != "offset_commit_cb") {
+      errstr = "Invalid value type, expected RdKafka::OffsetCommitCb";
+      return Conf::CONF_INVALID;
+    }
+
+    if (!rk_conf_) {
+      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+      return Conf::CONF_INVALID;
+    }
+
+    offset_commit_cb_ = offset_commit_cb;
+    return Conf::CONF_OK;
+  }
+
+  Conf::ConfResult get(const std::string &name, std::string &value) const {
+    if (name.compare("dr_cb") == 0 ||
+        name.compare("event_cb") == 0 ||
+        name.compare("partitioner_cb") == 0 ||
+        name.compare("partitioner_key_pointer_cb") == 0 ||
+        name.compare("socket_cb") == 0 ||
+        name.compare("open_cb") == 0 ||
+        name.compare("rebalance_cb") == 0 ||
+        name.compare("offset_commit_cb") == 0 ) {
+      return Conf::CONF_INVALID;
+    }
+    rd_kafka_conf_res_t res = RD_KAFKA_CONF_INVALID;
+
+    /* Get size of property */
+    size_t size;
+    if (rk_conf_)
+      res = rd_kafka_conf_get(rk_conf_,
+                              name.c_str(), NULL, &size);
+    else if (rkt_conf_)
+      res = rd_kafka_topic_conf_get(rkt_conf_,
+                                    name.c_str(), NULL, &size);
+    if (res != RD_KAFKA_CONF_OK)
+      return static_cast<Conf::ConfResult>(res);
+
+    char *tmpValue = new char[size];
+
+    if (rk_conf_)
+      res = rd_kafka_conf_get(rk_conf_, name.c_str(),
+                              tmpValue, &size);
+    else if (rkt_conf_)
+      res = rd_kafka_topic_conf_get(rkt_conf_,
+                                    name.c_str(), NULL, &size);
+
+    if (res == RD_KAFKA_CONF_OK)
+      value.assign(tmpValue);
+    delete[] tmpValue;
+
+    return static_cast<Conf::ConfResult>(res);
+  }
+
+  Conf::ConfResult get(DeliveryReportCb *&dr_cb) const {
+      if (!rk_conf_)
+	  return Conf::CONF_INVALID;
+      dr_cb = this->dr_cb_;
+      return Conf::CONF_OK;
+  }
+
+  Conf::ConfResult get(EventCb *&event_cb) const {
+      if (!rk_conf_)
+	  return Conf::CONF_INVALID;
+      event_cb = this->event_cb_;
+      return Conf::CONF_OK;
+  }
+
+  Conf::ConfResult get(PartitionerCb *&partitioner_cb) const {
+      if (!rkt_conf_)
+	  return Conf::CONF_INVALID;
+      partitioner_cb = this->partitioner_cb_;
+      return Conf::CONF_OK;
+  }
+
+  Conf::ConfResult get(PartitionerKeyPointerCb *&partitioner_kp_cb) const {
+      if (!rkt_conf_)
+	  return Conf::CONF_INVALID;
+      partitioner_kp_cb = this->partitioner_kp_cb_;
+      return Conf::CONF_OK;
+  }
+
+  Conf::ConfResult get(SocketCb *&socket_cb) const {
+      if (!rk_conf_)
+	  return Conf::CONF_INVALID;
+      socket_cb = this->socket_cb_;
+      return Conf::CONF_OK;
+  }
+
+  Conf::ConfResult get(OpenCb *&open_cb) const {
+      if (!rk_conf_)
+	  return Conf::CONF_INVALID;
+      open_cb = this->open_cb_;
+      return Conf::CONF_OK;
+  }
+
+  Conf::ConfResult get(RebalanceCb *&rebalance_cb) const {
+      if (!rk_conf_)
+	  return Conf::CONF_INVALID;
+      rebalance_cb = this->rebalance_cb_;
+      return Conf::CONF_OK;
+  }
+
+  Conf::ConfResult get(OffsetCommitCb *&offset_commit_cb) const {
+      if (!rk_conf_)
+	  return Conf::CONF_INVALID;
+      offset_commit_cb = this->offset_commit_cb_;
+      return Conf::CONF_OK;
+    }
+
+
+
+  std::list<std::string> *dump ();
+
+
+  Conf::ConfResult set (const std::string &name, ConsumeCb *consume_cb,
+                        std::string &errstr) {
+    if (name != "consume_cb") {
+      errstr = "Invalid value type, expected RdKafka::ConsumeCb";
+      return Conf::CONF_INVALID;
+    }
+
+    if (!rk_conf_) {
+      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+      return Conf::CONF_INVALID;
+    }
+
+    consume_cb_ = consume_cb;
+    return Conf::CONF_OK;
+  }
+
+
+  ConsumeCb *consume_cb_;
+  DeliveryReportCb *dr_cb_;
+  EventCb *event_cb_;
+  SocketCb *socket_cb_;
+  OpenCb *open_cb_;
+  PartitionerCb *partitioner_cb_;
+  PartitionerKeyPointerCb *partitioner_kp_cb_;
+  RebalanceCb *rebalance_cb_;
+  OffsetCommitCb *offset_commit_cb_;
+  ConfType conf_type_;
+  rd_kafka_conf_t *rk_conf_;
+  rd_kafka_topic_conf_t *rkt_conf_;
+};
+
+
+class HandleImpl : virtual public Handle {
+ public:
+  ~HandleImpl() {};
+  HandleImpl () {};
+  const std::string name () const { return std::string(rd_kafka_name(rk_)); };
+  const std::string memberid () const {
+	  char *str = rd_kafka_memberid(rk_);
+	  std::string memberid = str ? str : "";
+	  if (str)
+		  rd_kafka_mem_free(rk_, str);
+	  return memberid;
+  }
+  int poll (int timeout_ms) { return rd_kafka_poll(rk_, timeout_ms); };
+  int outq_len () { return rd_kafka_outq_len(rk_); };
+
+  void set_common_config (RdKafka::ConfImpl *confimpl);
+
+  RdKafka::ErrorCode metadata (bool all_topics,const Topic *only_rkt,
+            Metadata **metadatap, int timeout_ms);
+
+  ErrorCode pause (std::vector<TopicPartition*> &partitions);
+  ErrorCode resume (std::vector<TopicPartition*> &partitions);
+
+  ErrorCode query_watermark_offsets (const std::string &topic,
+				     int32_t partition,
+				     int64_t *low, int64_t *high,
+				     int timeout_ms) {
+    return static_cast<RdKafka::ErrorCode>(
+        rd_kafka_query_watermark_offsets(
+            rk_, topic.c_str(), partition,
+            low, high, timeout_ms));
+  }
+
+  ErrorCode get_watermark_offsets (const std::string &topic,
+                                   int32_t partition,
+                                   int64_t *low, int64_t *high) {
+    return static_cast<RdKafka::ErrorCode>(
+        rd_kafka_get_watermark_offsets(
+            rk_, topic.c_str(), partition,
+            low, high));
+  }
+
+  Queue *get_partition_queue (const TopicPartition *partition);
+
+  ErrorCode offsetsForTimes (std::vector<TopicPartition*> &offsets,
+                             int timeout_ms) {
+    rd_kafka_topic_partition_list_t *c_offsets = partitions_to_c_parts(offsets);
+    ErrorCode err = static_cast<ErrorCode>(
+        rd_kafka_offsets_for_times(rk_, c_offsets, timeout_ms));
+    update_partitions_from_c_parts(offsets, c_offsets);
+    rd_kafka_topic_partition_list_destroy(c_offsets);
+    return err;
+  }
+
+  ErrorCode set_log_queue (Queue *queue);
+
+  void yield () {
+    rd_kafka_yield(rk_);
+  }
+
+  const std::string clusterid (int timeout_ms) {
+          char *str = rd_kafka_clusterid(rk_, timeout_ms);
+          std::string clusterid = str ? str : "";
+          if (str)
+                  rd_kafka_mem_free(rk_, str);
+          return clusterid;
+  }
+
+  struct rd_kafka_s *c_ptr () {
+          return rk_;
+  }
+
+  rd_kafka_t *rk_;
+  /* All Producer and Consumer callbacks must reside in HandleImpl and
+   * the opaque provided to rdkafka must be a pointer to HandleImpl, since
+   * ProducerImpl and ConsumerImpl classes cannot be safely directly cast to
+   * HandleImpl due to the skewed diamond inheritance. */
+  ConsumeCb *consume_cb_;
+  EventCb *event_cb_;
+  SocketCb *socket_cb_;
+  OpenCb *open_cb_;
+  DeliveryReportCb *dr_cb_;
+  PartitionerCb *partitioner_cb_;
+  PartitionerKeyPointerCb *partitioner_kp_cb_;
+  RebalanceCb *rebalance_cb_;
+  OffsetCommitCb *offset_commit_cb_;
+};
+
+
+class TopicImpl : public Topic {
+ public:
+  ~TopicImpl () {
+    rd_kafka_topic_destroy(rkt_);
+  }
+
+  const std::string name () const {
+    return rd_kafka_topic_name(rkt_);
+  }
+
+  bool partition_available (int32_t partition) const {
+    return !!rd_kafka_topic_partition_available(rkt_, partition);
+  }
+
+  ErrorCode offset_store (int32_t partition, int64_t offset) {
+    return static_cast<RdKafka::ErrorCode>(
+        rd_kafka_offset_store(rkt_, partition, offset));
+  }
+
+  static Topic *create (Handle &base, const std::string &topic,
+                        Conf *conf);
+
+  struct rd_kafka_topic_s *c_ptr () {
+          return rkt_;
+  }
+
+  rd_kafka_topic_t *rkt_;
+  PartitionerCb *partitioner_cb_;
+  PartitionerKeyPointerCb *partitioner_kp_cb_;
+};
+
+
+/**
+ * Topic and Partition
+ */
+class TopicPartitionImpl : public TopicPartition {
+public:
+  ~TopicPartitionImpl() {};
+
+  static TopicPartition *create (const std::string &topic, int partition);
+
+  TopicPartitionImpl (const std::string &topic, int partition):
+  topic_(topic), partition_(partition), offset_(RdKafka::Topic::OFFSET_INVALID),
+      err_(ERR_NO_ERROR) {}
+
+  TopicPartitionImpl (const std::string &topic, int partition, int64_t offset):
+  topic_(topic), partition_(partition), offset_(offset),
+          err_(ERR_NO_ERROR) {}
+
+  TopicPartitionImpl (const rd_kafka_topic_partition_t *c_part) {
+    topic_ = std::string(c_part->topic);
+    partition_ = c_part->partition;
+    offset_ = c_part->offset;
+    err_ = static_cast<ErrorCode>(c_part->err);
+    // FIXME: metadata
+  }
+
+  static void destroy (std::vector<TopicPartition*> &partitions);
+
+  int partition () const { return partition_; }
+  const std::string &topic () const { return topic_ ; }
+
+  int64_t offset () const { return offset_; }
+
+  ErrorCode err () const { return err_; }
+
+  void set_offset (int64_t offset) { offset_ = offset; }
+
+  std::ostream& operator<<(std::ostream &ostrm) const {
+    return ostrm << topic_ << " [" << partition_ << "]";
+  }
+
+  std::string topic_;
+  int partition_;
+  int64_t offset_;
+  ErrorCode err_;
+};
+
+
+
+class KafkaConsumerImpl : virtual public KafkaConsumer, virtual public HandleImpl {
+public:
+  ~KafkaConsumerImpl () {
+
+  }
+
+  static KafkaConsumer *create (Conf *conf, std::string &errstr);
+
+  ErrorCode assignment (std::vector<TopicPartition*> &partitions);
+  ErrorCode subscription (std::vector<std::string> &topics);
+  ErrorCode subscribe (const std::vector<std::string> &topics);
+  ErrorCode unsubscribe ();
+  ErrorCode assign (const std::vector<TopicPartition*> &partitions);
+  ErrorCode unassign ();
+
+  Message *consume (int timeout_ms);
+  ErrorCode commitSync () {
+    return static_cast<ErrorCode>(rd_kafka_commit(rk_, NULL, 0/*sync*/));
+  }
+  ErrorCode commitAsync () {
+    return static_cast<ErrorCode>(rd_kafka_commit(rk_, NULL, 1/*async*/));
+  }
+  ErrorCode commitSync (Message *message) {
+	  MessageImpl *msgimpl = dynamic_cast<MessageImpl*>(message);
+	  return static_cast<ErrorCode>(
+                  rd_kafka_commit_message(rk_, msgimpl->rkmessage_, 0/*sync*/));
+  }
+  ErrorCode commitAsync (Message *message) {
+	  MessageImpl *msgimpl = dynamic_cast<MessageImpl*>(message);
+	  return static_cast<ErrorCode>(
+                  rd_kafka_commit_message(rk_, msgimpl->rkmessage_,1/*async*/));
+  }
+
+  ErrorCode commitSync (std::vector<TopicPartition*> &offsets) {
+	  rd_kafka_topic_partition_list_t *c_parts =
+		  partitions_to_c_parts(offsets);
+	  rd_kafka_resp_err_t err =
+		  rd_kafka_commit(rk_, c_parts, 0);
+	  if (!err)
+		  update_partitions_from_c_parts(offsets, c_parts);
+	  rd_kafka_topic_partition_list_destroy(c_parts);
+	  return static_cast<ErrorCode>(err);
+  }
+
+  ErrorCode commitAsync (const std::vector<TopicPartition*> &offsets) {
+	  rd_kafka_topic_partition_list_t *c_parts =
+		  partitions_to_c_parts(offsets);
+	  rd_kafka_resp_err_t err =
+		  rd_kafka_commit(rk_, c_parts, 1);
+	  rd_kafka_topic_partition_list_destroy(c_parts);
+	  return static_cast<ErrorCode>(err);
+  }
+
+  ErrorCode commitSync (OffsetCommitCb *offset_commit_cb) {
+          return static_cast<ErrorCode>(
+                  rd_kafka_commit_queue(rk_, NULL, NULL,
+                                        RdKafka::offset_commit_cb_trampoline0,
+                                        offset_commit_cb));
+  }
+
+  ErrorCode commitSync (std::vector<TopicPartition*> &offsets,
+                        OffsetCommitCb *offset_commit_cb) {
+          rd_kafka_topic_partition_list_t *c_parts =
+                  partitions_to_c_parts(offsets);
+          rd_kafka_resp_err_t err =
+                  rd_kafka_commit_queue(rk_, c_parts, NULL,
+                                        RdKafka::offset_commit_cb_trampoline0,
+                                        offset_commit_cb);
+          rd_kafka_topic_partition_list_destroy(c_parts);
+          return static_cast<ErrorCode>(err);
+  }
+
+  ErrorCode committed (std::vector<TopicPartition*> &partitions, int timeout_ms);
+  ErrorCode position (std::vector<TopicPartition*> &partitions);
+
+  ErrorCode close ();
+
+  ErrorCode seek (const TopicPartition &partition, int timeout_ms);
+
+  ErrorCode offsets_store (std::vector<TopicPartition*> &offsets) {
+          rd_kafka_topic_partition_list_t *c_parts =
+                  partitions_to_c_parts(offsets);
+          rd_kafka_resp_err_t err =
+                  rd_kafka_offsets_store(rk_, c_parts);
+          update_partitions_from_c_parts(offsets, c_parts);
+          rd_kafka_topic_partition_list_destroy(c_parts);
+          return static_cast<ErrorCode>(err);
+  }
+
+};
+
+
+class MetadataImpl : public Metadata {
+ public:
+  MetadataImpl(const rd_kafka_metadata_t *metadata);
+  ~MetadataImpl();
+
+  const std::vector<const BrokerMetadata *> *brokers() const {
+    return &brokers_;
+  }
+
+  const std::vector<const TopicMetadata *>  *topics() const {
+    return &topics_;
+  }
+
+  const std::string orig_broker_name() const {
+    return std::string(metadata_->orig_broker_name);
+  }
+
+  int32_t orig_broker_id() const {
+    return metadata_->orig_broker_id;
+  }
+
+private:
+  const rd_kafka_metadata_t *metadata_;
+  std::vector<const BrokerMetadata *> brokers_;
+  std::vector<const TopicMetadata *> topics_;
+  std::string orig_broker_name_;
+};
+
+
+class QueueImpl : virtual public Queue {
+ public:
+  ~QueueImpl () {
+    rd_kafka_queue_destroy(queue_);
+  }
+  static Queue *create (Handle *base);
+  ErrorCode forward (Queue *queue);
+  Message *consume (int timeout_ms);
+  int poll (int timeout_ms);
+  void io_event_enable(int fd, const void *payload, size_t size);
+
+  rd_kafka_queue_t *queue_;
+};
+
+
+
+
+
+class ConsumerImpl : virtual public Consumer, virtual public HandleImpl {
+ public:
+  ~ConsumerImpl () {
+    rd_kafka_destroy(rk_); };
+  static Consumer *create (Conf *conf, std::string &errstr);
+
+  ErrorCode start (Topic *topic, int32_t partition, int64_t offset);
+  ErrorCode start (Topic *topic, int32_t partition, int64_t offset,
+                   Queue *queue);
+  ErrorCode stop (Topic *topic, int32_t partition);
+  ErrorCode seek (Topic *topic, int32_t partition, int64_t offset,
+		  int timeout_ms);
+  Message *consume (Topic *topic, int32_t partition, int timeout_ms);
+  Message *consume (Queue *queue, int timeout_ms);
+  int consume_callback (Topic *topic, int32_t partition, int timeout_ms,
+                        ConsumeCb *cb, void *opaque);
+  int consume_callback (Queue *queue, int timeout_ms,
+                        RdKafka::ConsumeCb *consume_cb, void *opaque);
+};
+
+
+
+class ProducerImpl : virtual public Producer, virtual public HandleImpl {
+
+ public:
+  ~ProducerImpl () { if (rk_) rd_kafka_destroy(rk_); };
+
+  ErrorCode produce (Topic *topic, int32_t partition,
+                     int msgflags,
+                     void *payload, size_t len,
+                     const std::string *key,
+                     void *msg_opaque);
+
+  ErrorCode produce (Topic *topic, int32_t partition,
+                     int msgflags,
+                     void *payload, size_t len,
+                     const void *key, size_t key_len,
+                     void *msg_opaque);
+
+  ErrorCode produce (Topic *topic, int32_t partition,
+                     const std::vector<char> *payload,
+                     const std::vector<char> *key,
+                     void *msg_opaque);
+
+  ErrorCode produce (const std::string topic_name, int32_t partition,
+                     int msgflags,
+                     void *payload, size_t len,
+                     const void *key, size_t key_len,
+                     int64_t timestamp,
+                     void *msg_opaque);
+
+  ErrorCode flush (int timeout_ms) {
+	  return static_cast<RdKafka::ErrorCode>(rd_kafka_flush(rk_,
+								timeout_ms));
+  }
+
+  static Producer *create (Conf *conf, std::string &errstr);
+
+};
+
+
+
+}
+
+#endif /* _RDKAFKACPP_INT_H_ */


[22/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_partition.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_partition.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_partition.c
deleted file mode 100644
index fdd1611..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_partition.c
+++ /dev/null
@@ -1,3272 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#include "rdkafka_int.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_request.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_partition.h"
-#include "rdregex.h"
-#include "rdports.h"  /* rd_qsort_r() */
-
-const char *rd_kafka_fetch_states[] = {
-	"none",
-        "stopping",
-        "stopped",
-	"offset-query",
-	"offset-wait",
-	"active"
-};
-
-
-static rd_kafka_op_res_t
-rd_kafka_toppar_op_serve (rd_kafka_t *rk,
-                          rd_kafka_q_t *rkq, rd_kafka_op_t *rko,
-                          rd_kafka_q_cb_type_t cb_type, void *opaque);
-static RD_INLINE void rd_kafka_broker_fetch_toppar_del (rd_kafka_broker_t *rkb,
-                                                       rd_kafka_toppar_t *rktp);
-
-
-
-static RD_INLINE int32_t
-rd_kafka_toppar_version_new_barrier0 (rd_kafka_toppar_t *rktp,
-				     const char *func, int line) {
-	int32_t version = rd_atomic32_add(&rktp->rktp_version, 1);
-	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BARRIER",
-		     "%s [%"PRId32"]: %s:%d: new version barrier v%"PRId32,
-		     rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
-		     func, line, version);
-	return version;
-}
-
-#define rd_kafka_toppar_version_new_barrier(rktp) \
-	rd_kafka_toppar_version_new_barrier0(rktp, __FUNCTION__, __LINE__)
-
-
-/**
- * Toppar based OffsetResponse handling.
- * This is used for updating the low water mark for consumer lag.
- */
-static void rd_kafka_toppar_lag_handle_Offset (rd_kafka_t *rk,
-					       rd_kafka_broker_t *rkb,
-					       rd_kafka_resp_err_t err,
-					       rd_kafka_buf_t *rkbuf,
-					       rd_kafka_buf_t *request,
-					       void *opaque) {
-        shptr_rd_kafka_toppar_t *s_rktp = opaque;
-        rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp);
-        rd_kafka_topic_partition_list_t *offsets;
-        rd_kafka_topic_partition_t *rktpar;
-
-        offsets = rd_kafka_topic_partition_list_new(1);
-
-        /* Parse and return Offset */
-        err = rd_kafka_handle_Offset(rkb->rkb_rk, rkb, err,
-                                     rkbuf, request, offsets);
-        if (!err && !(rktpar = rd_kafka_topic_partition_list_find(
-                              offsets,
-                              rktp->rktp_rkt->rkt_topic->str,
-                              rktp->rktp_partition)))
-                err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-
-        if (!err) {
-                rd_kafka_toppar_lock(rktp);
-                rktp->rktp_lo_offset = rktpar->offset;
-                rd_kafka_toppar_unlock(rktp);
-        }
-
-        rd_kafka_topic_partition_list_destroy(offsets);
-
-        rktp->rktp_wait_consumer_lag_resp = 0;
-
-        rd_kafka_toppar_destroy(s_rktp); /* from request.opaque */
-}
-
-
-
-/**
- * Request information from broker to keep track of consumer lag.
- *
- * Locality: toppar handle thread
- */
-static void rd_kafka_toppar_consumer_lag_req (rd_kafka_toppar_t *rktp) {
-	rd_kafka_broker_t *rkb;
-        rd_kafka_topic_partition_list_t *partitions;
-
-        if (rktp->rktp_wait_consumer_lag_resp)
-                return; /* Previous request not finished yet */
-
-        rkb = rd_kafka_toppar_leader(rktp, 1/*proper brokers only*/);
-        if (!rkb)
-		return;
-
-        rktp->rktp_wait_consumer_lag_resp = 1;
-
-        partitions = rd_kafka_topic_partition_list_new(1);
-        rd_kafka_topic_partition_list_add(partitions,
-                                          rktp->rktp_rkt->rkt_topic->str,
-                                          rktp->rktp_partition)->offset =
-                RD_KAFKA_OFFSET_BEGINNING;
-
-        /* Ask for oldest offset. The newest offset is automatically
-         * propagated in FetchResponse.HighwaterMark. */
-        rd_kafka_OffsetRequest(rkb, partitions, 0,
-                               RD_KAFKA_REPLYQ(rktp->rktp_ops, 0),
-                               rd_kafka_toppar_lag_handle_Offset,
-                               rd_kafka_toppar_keep(rktp));
-
-        rd_kafka_topic_partition_list_destroy(partitions);
-
-        rd_kafka_broker_destroy(rkb); /* from toppar_leader() */
-}
-
-
-
-/**
- * Request earliest offset to measure consumer lag
- *
- * Locality: toppar handler thread
- */
-static void rd_kafka_toppar_consumer_lag_tmr_cb (rd_kafka_timers_t *rkts,
-						 void *arg) {
-	rd_kafka_toppar_t *rktp = arg;
-	rd_kafka_toppar_consumer_lag_req(rktp);
-}
-
-
-/**
- * Add new partition to topic.
- *
- * Locks: rd_kafka_topic_wrlock() must be held.
- * Locks: rd_kafka_wrlock() must be held.
- */
-shptr_rd_kafka_toppar_t *rd_kafka_toppar_new0 (rd_kafka_itopic_t *rkt,
-					       int32_t partition,
-					       const char *func, int line) {
-	rd_kafka_toppar_t *rktp;
-
-	rktp = rd_calloc(1, sizeof(*rktp));
-
-	rktp->rktp_partition = partition;
-	rktp->rktp_rkt = rkt;
-        rktp->rktp_leader_id = -1;
-	rktp->rktp_fetch_state = RD_KAFKA_TOPPAR_FETCH_NONE;
-        rktp->rktp_fetch_msg_max_bytes
-            = rkt->rkt_rk->rk_conf.fetch_msg_max_bytes;
-	rktp->rktp_offset_fp = NULL;
-        rd_kafka_offset_stats_reset(&rktp->rktp_offsets);
-        rd_kafka_offset_stats_reset(&rktp->rktp_offsets_fin);
-        rktp->rktp_hi_offset = RD_KAFKA_OFFSET_INVALID;
-	rktp->rktp_lo_offset = RD_KAFKA_OFFSET_INVALID;
-	rktp->rktp_app_offset = RD_KAFKA_OFFSET_INVALID;
-        rktp->rktp_stored_offset = RD_KAFKA_OFFSET_INVALID;
-        rktp->rktp_committed_offset = RD_KAFKA_OFFSET_INVALID;
-	rd_kafka_msgq_init(&rktp->rktp_msgq);
-        rktp->rktp_msgq_wakeup_fd = -1;
-	rd_kafka_msgq_init(&rktp->rktp_xmit_msgq);
-	mtx_init(&rktp->rktp_lock, mtx_plain);
-
-        rd_refcnt_init(&rktp->rktp_refcnt, 0);
-	rktp->rktp_fetchq = rd_kafka_q_new(rkt->rkt_rk);
-        rktp->rktp_ops    = rd_kafka_q_new(rkt->rkt_rk);
-        rktp->rktp_ops->rkq_serve = rd_kafka_toppar_op_serve;
-        rktp->rktp_ops->rkq_opaque = rktp;
-        rd_atomic32_init(&rktp->rktp_version, 1);
-	rktp->rktp_op_version = rd_atomic32_get(&rktp->rktp_version);
-
-        /* Consumer: If statistics is available we query the oldest offset
-         * of each partition.
-         * Since the oldest offset only moves on log retention, we cap this
-         * value on the low end to a reasonable value to avoid flooding
-         * the brokers with OffsetRequests when our statistics interval is low.
-         * FIXME: Use a global timer to collect offsets for all partitions */
-        if (rktp->rktp_rkt->rkt_rk->rk_conf.stats_interval_ms > 0 &&
-            rkt->rkt_rk->rk_type == RD_KAFKA_CONSUMER &&
-            rktp->rktp_partition != RD_KAFKA_PARTITION_UA) {
-                int intvl = rkt->rkt_rk->rk_conf.stats_interval_ms;
-                if (intvl < 10 * 1000 /* 10s */)
-                        intvl = 10 * 1000;
-		rd_kafka_timer_start(&rkt->rkt_rk->rk_timers,
-				     &rktp->rktp_consumer_lag_tmr,
-                                     intvl * 1000ll,
-				     rd_kafka_toppar_consumer_lag_tmr_cb,
-				     rktp);
-        }
-
-        rktp->rktp_s_rkt = rd_kafka_topic_keep(rkt);
-
-	rd_kafka_q_fwd_set(rktp->rktp_ops, rkt->rkt_rk->rk_ops);
-	rd_kafka_dbg(rkt->rkt_rk, TOPIC, "TOPPARNEW", "NEW %s [%"PRId32"] %p (at %s:%d)",
-		     rkt->rkt_topic->str, rktp->rktp_partition, rktp,
-		     func, line);
-
-	return rd_kafka_toppar_keep_src(func, line, rktp);
-}
-
-
-
-/**
- * Removes a toppar from its duties, global lists, etc.
- *
- * Locks: rd_kafka_toppar_lock() MUST be held
- */
-static void rd_kafka_toppar_remove (rd_kafka_toppar_t *rktp) {
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPPARREMOVE",
-                     "Removing toppar %s [%"PRId32"] %p",
-                     rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
-		     rktp);
-
-	rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
-			    &rktp->rktp_offset_query_tmr, 1/*lock*/);
-	rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
-			    &rktp->rktp_consumer_lag_tmr, 1/*lock*/);
-
-	rd_kafka_q_fwd_set(rktp->rktp_ops, NULL);
-}
-
-
-/**
- * Final destructor for partition.
- */
-void rd_kafka_toppar_destroy_final (rd_kafka_toppar_t *rktp) {
-
-        rd_kafka_toppar_remove(rktp);
-
-	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESTROY",
-		     "%s [%"PRId32"]: %p DESTROY_FINAL",
-		     rktp->rktp_rkt->rkt_topic->str,
-                     rktp->rktp_partition, rktp);
-
-	/* Clear queues */
-	rd_kafka_assert(rktp->rktp_rkt->rkt_rk,
-			rd_kafka_msgq_len(&rktp->rktp_xmit_msgq) == 0);
-	rd_kafka_dr_msgq(rktp->rktp_rkt, &rktp->rktp_msgq,
-			 RD_KAFKA_RESP_ERR__DESTROY);
-	rd_kafka_q_destroy(rktp->rktp_fetchq);
-        rd_kafka_q_destroy(rktp->rktp_ops);
-
-	rd_kafka_replyq_destroy(&rktp->rktp_replyq);
-
-	rd_kafka_topic_destroy0(rktp->rktp_s_rkt);
-
-	mtx_destroy(&rktp->rktp_lock);
-
-        rd_refcnt_destroy(&rktp->rktp_refcnt);
-
-	rd_free(rktp);
-}
-
-
-/**
- * Set toppar fetching state.
- *
- * Locality: broker thread
- * Locks: rd_kafka_toppar_lock() MUST be held.
- */
-void rd_kafka_toppar_set_fetch_state (rd_kafka_toppar_t *rktp,
-                                      int fetch_state) {
-	rd_kafka_assert(NULL,
-			thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread));
-
-        if ((int)rktp->rktp_fetch_state == fetch_state)
-                return;
-
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "PARTSTATE",
-                     "Partition %.*s [%"PRId32"] changed fetch state %s -> %s",
-                     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                     rktp->rktp_partition,
-                     rd_kafka_fetch_states[rktp->rktp_fetch_state],
-                     rd_kafka_fetch_states[fetch_state]);
-
-        rktp->rktp_fetch_state = fetch_state;
-}
-
-
-/**
- * Returns the appropriate toppar for a given rkt and partition.
- * The returned toppar has increased refcnt and must be unreffed by calling
- *  rd_kafka_toppar_destroy().
- * May return NULL.
- *
- * If 'ua_on_miss' is true the UA (unassigned) toppar is returned if
- * 'partition' was not known locally, else NULL is returned.
- *
- * Locks: Caller must hold rd_kafka_topic_*lock()
- */
-shptr_rd_kafka_toppar_t *rd_kafka_toppar_get0 (const char *func, int line,
-                                               const rd_kafka_itopic_t *rkt,
-                                               int32_t partition,
-                                               int ua_on_miss) {
-        shptr_rd_kafka_toppar_t *s_rktp;
-
-	if (partition >= 0 && partition < rkt->rkt_partition_cnt)
-		s_rktp = rkt->rkt_p[partition];
-	else if (partition == RD_KAFKA_PARTITION_UA || ua_on_miss)
-		s_rktp = rkt->rkt_ua;
-	else
-		return NULL;
-
-	if (s_rktp)
-                return rd_kafka_toppar_keep_src(func,line,
-                                                rd_kafka_toppar_s2i(s_rktp));
-
-	return NULL;
-}
-
-
-/**
- * Same as rd_kafka_toppar_get() but no need for locking and
- * looks up the topic first.
- *
- * Locality: any
- * Locks: none
- */
-shptr_rd_kafka_toppar_t *rd_kafka_toppar_get2 (rd_kafka_t *rk,
-                                               const char *topic,
-                                               int32_t partition,
-                                               int ua_on_miss,
-                                               int create_on_miss) {
-	shptr_rd_kafka_itopic_t *s_rkt;
-        rd_kafka_itopic_t *rkt;
-        shptr_rd_kafka_toppar_t *s_rktp;
-
-        rd_kafka_wrlock(rk);
-
-        /* Find or create topic */
-	if (unlikely(!(s_rkt = rd_kafka_topic_find(rk, topic, 0/*no-lock*/)))) {
-                if (!create_on_miss) {
-                        rd_kafka_wrunlock(rk);
-                        return NULL;
-                }
-                s_rkt = rd_kafka_topic_new0(rk, topic, NULL,
-					    NULL, 0/*no-lock*/);
-                if (!s_rkt) {
-                        rd_kafka_wrunlock(rk);
-                        rd_kafka_log(rk, LOG_ERR, "TOPIC",
-                                     "Failed to create local topic \"%s\": %s",
-                                     topic, rd_strerror(errno));
-                        return NULL;
-                }
-        }
-
-        rd_kafka_wrunlock(rk);
-
-        rkt = rd_kafka_topic_s2i(s_rkt);
-
-	rd_kafka_topic_wrlock(rkt);
-	s_rktp = rd_kafka_toppar_desired_add(rkt, partition);
-	rd_kafka_topic_wrunlock(rkt);
-
-        rd_kafka_topic_destroy0(s_rkt);
-
-	return s_rktp;
-}
-
-
-/**
- * Returns a toppar if it is available in the cluster.
- * '*errp' is set to the error-code if lookup fails.
- *
- * Locks: topic_*lock() MUST be held
- */
-shptr_rd_kafka_toppar_t *
-rd_kafka_toppar_get_avail (const rd_kafka_itopic_t *rkt,
-                           int32_t partition, int ua_on_miss,
-                           rd_kafka_resp_err_t *errp) {
-	shptr_rd_kafka_toppar_t *s_rktp;
-
-        switch (rkt->rkt_state)
-        {
-        case RD_KAFKA_TOPIC_S_UNKNOWN:
-                /* No metadata received from cluster yet.
-                 * Put message in UA partition and re-run partitioner when
-                 * cluster comes up. */
-		partition = RD_KAFKA_PARTITION_UA;
-                break;
-
-        case RD_KAFKA_TOPIC_S_NOTEXISTS:
-                /* Topic not found in cluster.
-                 * Fail message immediately. */
-                *errp = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
-                return NULL;
-
-        case RD_KAFKA_TOPIC_S_EXISTS:
-                /* Topic exists in cluster. */
-
-                /* Topic exists but has no partitions.
-                 * This is usually an transient state following the
-                 * auto-creation of a topic. */
-                if (unlikely(rkt->rkt_partition_cnt == 0)) {
-                        partition = RD_KAFKA_PARTITION_UA;
-                        break;
-                }
-
-                /* Check that partition exists. */
-                if (partition >= rkt->rkt_partition_cnt) {
-                        *errp = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-                        return NULL;
-                }
-                break;
-
-        default:
-                rd_kafka_assert(rkt->rkt_rk, !*"NOTREACHED");
-                break;
-        }
-
-	/* Get new partition */
-	s_rktp = rd_kafka_toppar_get(rkt, partition, 0);
-
-	if (unlikely(!s_rktp)) {
-		/* Unknown topic or partition */
-		if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS)
-			*errp = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
-		else
-			*errp = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-
-		return NULL;
-	}
-
-	return s_rktp;
-}
-
-
-/**
- * Looks for partition 'i' in topic 'rkt's desired list.
- *
- * The desired partition list is the list of partitions that are desired
- * (e.g., by the consumer) but not yet seen on a broker.
- * As soon as the partition is seen on a broker the toppar is moved from
- * the desired list and onto the normal rkt_p array.
- * When the partition on the broker goes away a desired partition is put
- * back on the desired list.
- *
- * Locks: rd_kafka_topic_*lock() must be held.
- * Note: 'rktp' refcount is increased.
- */
-
-shptr_rd_kafka_toppar_t *rd_kafka_toppar_desired_get (rd_kafka_itopic_t *rkt,
-                                                      int32_t partition) {
-	shptr_rd_kafka_toppar_t *s_rktp;
-        int i;
-
-	RD_LIST_FOREACH(s_rktp, &rkt->rkt_desp, i) {
-                rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp);
-		if (rktp->rktp_partition == partition)
-			return rd_kafka_toppar_keep(rktp);
-        }
-
-	return NULL;
-}
-
-
-/**
- * Link toppar on desired list.
- *
- * Locks: rd_kafka_topic_wrlock() and toppar_lock() must be held.
- */
-void rd_kafka_toppar_desired_link (rd_kafka_toppar_t *rktp) {
-        shptr_rd_kafka_toppar_t *s_rktp;
-
-        if (rktp->rktp_s_for_desp)
-                return; /* Already linked */
-
-        s_rktp = rd_kafka_toppar_keep(rktp);
-        rd_list_add(&rktp->rktp_rkt->rkt_desp, s_rktp);
-        rktp->rktp_s_for_desp = s_rktp; /* Desired list refcount */
-}
-
-/**
- * Unlink toppar from desired list.
- *
- * Locks: rd_kafka_topic_wrlock() and toppar_lock() must be held.
- */
-void rd_kafka_toppar_desired_unlink (rd_kafka_toppar_t *rktp) {
-        if (!rktp->rktp_s_for_desp)
-                return; /* Not linked */
-
-        rd_list_remove(&rktp->rktp_rkt->rkt_desp, rktp->rktp_s_for_desp);
-        rd_kafka_toppar_destroy(rktp->rktp_s_for_desp);
-        rktp->rktp_s_for_desp = NULL;
- }
-
-
-/**
- * @brief If rktp is not already desired:
- *  - mark as DESIRED|UNKNOWN
- *  - add to desired list
- *
- * @remark toppar_lock() MUST be held
- */
-void rd_kafka_toppar_desired_add0 (rd_kafka_toppar_t *rktp) {
-        if ((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED))
-                return;
-
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESIRED",
-                     "%s [%"PRId32"]: adding to DESIRED list",
-                     rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
-	rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_DESIRED;
-        rd_kafka_toppar_desired_link(rktp);
-}
-
-
-/**
- * Adds 'partition' as a desired partition to topic 'rkt', or updates
- * an existing partition to be desired.
- *
- * Locks: rd_kafka_topic_wrlock() must be held.
- */
-shptr_rd_kafka_toppar_t *rd_kafka_toppar_desired_add (rd_kafka_itopic_t *rkt,
-                                                      int32_t partition) {
-	shptr_rd_kafka_toppar_t *s_rktp;
-        rd_kafka_toppar_t *rktp;
-
-	if ((s_rktp = rd_kafka_toppar_get(rkt,
-                                          partition, 0/*no_ua_on_miss*/))) {
-                rktp = rd_kafka_toppar_s2i(s_rktp);
-		rd_kafka_toppar_lock(rktp);
-                if (unlikely(!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED))) {
-                        rd_kafka_dbg(rkt->rkt_rk, TOPIC, "DESP",
-                                     "Setting topic %s [%"PRId32"] partition "
-                                     "as desired",
-                                     rkt->rkt_topic->str, rktp->rktp_partition);
-                        rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_DESIRED;
-                }
-		rd_kafka_toppar_unlock(rktp);
-		return s_rktp;
-	}
-
-	if ((s_rktp = rd_kafka_toppar_desired_get(rkt, partition)))
-		return s_rktp;
-
-	s_rktp = rd_kafka_toppar_new(rkt, partition);
-        rktp = rd_kafka_toppar_s2i(s_rktp);
-
-        rd_kafka_toppar_lock(rktp);
-        rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_UNKNOWN;
-        rd_kafka_toppar_desired_add0(rktp);
-        rd_kafka_toppar_unlock(rktp);
-
-	rd_kafka_dbg(rkt->rkt_rk, TOPIC, "DESP",
-		     "Adding desired topic %s [%"PRId32"]",
-		     rkt->rkt_topic->str, rktp->rktp_partition);
-
-	return s_rktp; /* Callers refcount */
-}
-
-
-
-
-/**
- * Unmarks an 'rktp' as desired.
- *
- * Locks: rd_kafka_topic_wrlock() and rd_kafka_toppar_lock() MUST be held.
- */
-void rd_kafka_toppar_desired_del (rd_kafka_toppar_t *rktp) {
-
-	if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED))
-		return;
-
-	rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_DESIRED;
-        rd_kafka_toppar_desired_unlink(rktp);
-
-        if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN)
-                rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_UNKNOWN;
-
-
-	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESP",
-		     "Removing (un)desired topic %s [%"PRId32"]",
-		     rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
-}
-
-
-
-/**
- * Append message at tail of 'rktp' message queue.
- */
-void rd_kafka_toppar_enq_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm) {
-
-	rd_kafka_toppar_lock(rktp);
-	rd_kafka_msgq_enq(&rktp->rktp_msgq, rkm);
-#ifndef _MSC_VER
-        if (rktp->rktp_msgq_wakeup_fd != -1 &&
-            rd_kafka_msgq_len(&rktp->rktp_msgq) == 1) {
-                char one = 1;
-                int r;
-                r = rd_write(rktp->rktp_msgq_wakeup_fd, &one, sizeof(one));
-                if (r == -1)
-                        rd_kafka_log(rktp->rktp_rkt->rkt_rk, LOG_ERR, "PARTENQ",
-                                     "%s [%"PRId32"]: write to "
-                                     "wake-up fd %d failed: %s",
-                                     rktp->rktp_rkt->rkt_topic->str,
-                                     rktp->rktp_partition,
-                                     rktp->rktp_msgq_wakeup_fd,
-                                     rd_strerror(errno));
-        }
-#endif
-        rd_kafka_toppar_unlock(rktp);
-}
-
-
-/**
- * Dequeue message from 'rktp' message queue.
- */
-void rd_kafka_toppar_deq_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm) {
-	rd_kafka_toppar_lock(rktp);
-	rd_kafka_msgq_deq(&rktp->rktp_msgq, rkm, 1);
-	rd_kafka_toppar_unlock(rktp);
-}
-
-/**
- * Inserts all messages from 'rkmq' at head of toppar 'rktp's queue.
- * 'rkmq' will be cleared.
- */
-void rd_kafka_toppar_insert_msgq (rd_kafka_toppar_t *rktp,
-				  rd_kafka_msgq_t *rkmq) {
-	rd_kafka_toppar_lock(rktp);
-	rd_kafka_msgq_concat(rkmq, &rktp->rktp_msgq);
-	rd_kafka_msgq_move(&rktp->rktp_msgq, rkmq);
-	rd_kafka_toppar_unlock(rktp);
-}
-
-
-/**
- * Concats all messages from 'rkmq' at tail of toppar 'rktp's queue.
- * 'rkmq' will be cleared.
- */
-void rd_kafka_toppar_concat_msgq (rd_kafka_toppar_t *rktp,
-				  rd_kafka_msgq_t *rkmq) {
-	rd_kafka_toppar_lock(rktp);
-	rd_kafka_msgq_concat(&rktp->rktp_msgq, rkmq);
-	rd_kafka_toppar_unlock(rktp);
-}
-
-/**
- * Move all messages in 'rkmq' to the unassigned partition, if any.
- * Returns 0 on success or -1 if there was no UA partition.
- */
-int rd_kafka_toppar_ua_move (rd_kafka_itopic_t *rkt, rd_kafka_msgq_t *rkmq) {
-	shptr_rd_kafka_toppar_t *s_rktp_ua;
-
-	rd_kafka_topic_rdlock(rkt);
-	s_rktp_ua = rd_kafka_toppar_get(rkt, RD_KAFKA_PARTITION_UA, 0);
-	rd_kafka_topic_rdunlock(rkt);
-
-	if (unlikely(s_rktp_ua == NULL))
-		return -1;
-
-	rd_kafka_msgq_concat(&rd_kafka_toppar_s2i(s_rktp_ua)->rktp_msgq, rkmq);
-
-	rd_kafka_toppar_destroy(s_rktp_ua);
-
-	return 0;
-}
-
-
-/**
- * Helper method for purging queues when removing a toppar.
- * Locks: rd_kafka_toppar_lock() MUST be held
- */
-void rd_kafka_toppar_purge_queues (rd_kafka_toppar_t *rktp) {
-        rd_kafka_q_disable(rktp->rktp_fetchq);
-        rd_kafka_q_purge(rktp->rktp_fetchq);
-        rd_kafka_q_disable(rktp->rktp_ops);
-        rd_kafka_q_purge(rktp->rktp_ops);
-}
-
-
-/**
- * Migrate rktp from (optional) \p old_rkb to (optional) \p new_rkb.
- * This is an async operation.
- *
- * Locks: rd_kafka_toppar_lock() MUST be held
- */
-static void rd_kafka_toppar_broker_migrate (rd_kafka_toppar_t *rktp,
-                                            rd_kafka_broker_t *old_rkb,
-                                            rd_kafka_broker_t *new_rkb) {
-        rd_kafka_op_t *rko;
-        rd_kafka_broker_t *dest_rkb;
-        int had_next_leader = rktp->rktp_next_leader ? 1 : 0;
-
-        /* Update next leader */
-        if (new_rkb)
-                rd_kafka_broker_keep(new_rkb);
-        if (rktp->rktp_next_leader)
-                rd_kafka_broker_destroy(rktp->rktp_next_leader);
-        rktp->rktp_next_leader = new_rkb;
-
-        /* If next_leader is set it means there is already an async
-         * migration op going on and we should not send a new one
-         * but simply change the next_leader (which we did above). */
-        if (had_next_leader)
-                return;
-
-	/* Revert from offset-wait state back to offset-query
-	 * prior to leaving the broker to avoid stalling
-	 * on the new broker waiting for a offset reply from
-	 * this old broker (that might not come and thus need
-	 * to time out..slowly) */
-	if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT) {
-		rd_kafka_toppar_set_fetch_state(
-			rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
-		rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers,
-				     &rktp->rktp_offset_query_tmr,
-				     500*1000,
-				     rd_kafka_offset_query_tmr_cb,
-				     rktp);
-	}
-
-        if (old_rkb) {
-                /* If there is an existing broker for this toppar we let it
-                 * first handle its own leave and then trigger the join for
-                 * the next leader, if any. */
-                rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE);
-                dest_rkb = old_rkb;
-        } else {
-                /* No existing broker, send join op directly to new leader. */
-                rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_JOIN);
-                dest_rkb = new_rkb;
-        }
-
-        rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR",
-                     "Migrating topic %.*s [%"PRId32"] %p from %s to %s "
-		     "(sending %s to %s)",
-                     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                     rktp->rktp_partition, rktp,
-                     old_rkb ? rd_kafka_broker_name(old_rkb) : "(none)",
-                     new_rkb ? rd_kafka_broker_name(new_rkb) : "(none)",
-		     rd_kafka_op2str(rko->rko_type),
-		     rd_kafka_broker_name(dest_rkb));
-
-        rd_kafka_q_enq(dest_rkb->rkb_ops, rko);
-}
-
-
-/**
- * Async toppar leave from broker.
- * Only use this when partitions are to be removed.
- *
- * Locks: rd_kafka_toppar_lock() MUST be held
- */
-void rd_kafka_toppar_broker_leave_for_remove (rd_kafka_toppar_t *rktp) {
-        rd_kafka_op_t *rko;
-        rd_kafka_broker_t *dest_rkb;
-
-
-	if (rktp->rktp_next_leader)
-		dest_rkb = rktp->rktp_next_leader;
-	else if (rktp->rktp_leader)
-		dest_rkb = rktp->rktp_leader;
-	else {
-		rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPPARDEL",
-			     "%.*s [%"PRId32"] %p not handled by any broker: "
-			     "not sending LEAVE for remove",
-			     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-			     rktp->rktp_partition, rktp);
-		return;
-	}
-
-
-	/* Revert from offset-wait state back to offset-query
-	 * prior to leaving the broker to avoid stalling
-	 * on the new broker waiting for a offset reply from
-	 * this old broker (that might not come and thus need
-	 * to time out..slowly) */
-	if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT)
-		rd_kafka_toppar_set_fetch_state(
-			rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
-
-	rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE);
-        rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR",
-                     "%.*s [%"PRId32"] %p sending final LEAVE for removal by %s",
-                     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                     rktp->rktp_partition, rktp,
-                     rd_kafka_broker_name(dest_rkb));
-
-        rd_kafka_q_enq(dest_rkb->rkb_ops, rko);
-}
-
-
-
-/**
- * Delegates broker 'rkb' as leader for toppar 'rktp'.
- * 'rkb' may be NULL to undelegate leader.
- *
- * Locks: Caller must have rd_kafka_topic_wrlock(rktp->rktp_rkt) 
- *        AND rd_kafka_toppar_lock(rktp) held.
- */
-void rd_kafka_toppar_broker_delegate (rd_kafka_toppar_t *rktp,
-				      rd_kafka_broker_t *rkb,
-				      int for_removal) {
-        rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
-        int internal_fallback = 0;
-
-	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
-		     "%s [%"PRId32"]: delegate to broker %s "
-		     "(rktp %p, term %d, ref %d, remove %d)",
-		     rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
-		     rkb ? rkb->rkb_name : "(none)",
-		     rktp, rd_kafka_terminating(rk),
-		     rd_refcnt_get(&rktp->rktp_refcnt),
-		     for_removal);
-
-        /* Delegate toppars with no leader to the
-         * internal broker for bookkeeping. */
-        if (!rkb && !for_removal && !rd_kafka_terminating(rk)) {
-                rkb = rd_kafka_broker_internal(rk);
-                internal_fallback = 1;
-        }
-
-	if (rktp->rktp_leader == rkb && !rktp->rktp_next_leader) {
-                rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
-			     "%.*s [%"PRId32"]: not updating broker: "
-                             "already on correct broker %s",
-			     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-			     rktp->rktp_partition,
-                             rkb ? rd_kafka_broker_name(rkb) : "(none)");
-
-                if (internal_fallback)
-                        rd_kafka_broker_destroy(rkb);
-		return;
-        }
-
-	if (rktp->rktp_leader)
-		rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
-			     "%.*s [%"PRId32"]: broker %s no longer leader",
-			     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-			     rktp->rktp_partition,
-			     rd_kafka_broker_name(rktp->rktp_leader));
-
-
-	if (rkb) {
-		rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
-			     "%.*s [%"PRId32"]: broker %s is now leader "
-			     "for partition with %i messages "
-			     "(%"PRIu64" bytes) queued",
-			     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-			     rktp->rktp_partition,
-			     rd_kafka_broker_name(rkb),
-			     rd_atomic32_get(&rktp->rktp_msgq.rkmq_msg_cnt),
-			     rd_atomic64_get(&rktp->rktp_msgq.rkmq_msg_bytes));
-
-
-	} else {
-		rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
-			     "%.*s [%"PRId32"]: no leader broker",
-			     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-			     rktp->rktp_partition);
-	}
-
-        if (rktp->rktp_leader || rkb)
-                rd_kafka_toppar_broker_migrate(rktp, rktp->rktp_leader, rkb);
-
-        if (internal_fallback)
-                rd_kafka_broker_destroy(rkb);
-}
-
-
-
-
-
-void
-rd_kafka_toppar_offset_commit_result (rd_kafka_toppar_t *rktp,
-				      rd_kafka_resp_err_t err,
-				      rd_kafka_topic_partition_list_t *offsets){
-	if (err) {
-		rd_kafka_q_op_err(rktp->rktp_fetchq,
-				  RD_KAFKA_OP_CONSUMER_ERR,
-				  err, 0 /* FIXME:VERSION*/,
-				  rktp, 0,
-				  "Offset commit failed: %s",
-				  rd_kafka_err2str(err));
-		return;
-	}
-
-	rd_kafka_toppar_lock(rktp);
-	rktp->rktp_committed_offset = offsets->elems[0].offset;
-
-	/* When stopping toppars:
-	 * Final commit is now done (or failed), propagate. */
-	if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING)
-		rd_kafka_toppar_fetch_stopped(rktp, err);
-
-	rd_kafka_toppar_unlock(rktp);
-}
-
-
-/**
- * Commit toppar's offset on broker.
- * This is an asynch operation, this function simply enqueues an op
- * on the cgrp's queue.
- *
- * Locality: rktp's broker thread
- */
-void rd_kafka_toppar_offset_commit (rd_kafka_toppar_t *rktp, int64_t offset,
-				    const char *metadata) {
-        rd_kafka_topic_partition_list_t *offsets;
-        rd_kafka_topic_partition_t *rktpar;
-
-        rd_kafka_assert(rktp->rktp_rkt->rkt_rk, rktp->rktp_cgrp != NULL);
-        rd_kafka_assert(rktp->rktp_rkt->rkt_rk,
-                        rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE);
-
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, CGRP, "OFFSETCMT",
-                     "%.*s [%"PRId32"]: committing offset %"PRId64,
-                     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                     rktp->rktp_partition, offset);
-
-        offsets = rd_kafka_topic_partition_list_new(1);
-        rktpar = rd_kafka_topic_partition_list_add(
-                offsets, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
-        rktpar->offset = offset;
-        if (metadata) {
-                rktpar->metadata = rd_strdup(metadata);
-                rktpar->metadata_size = strlen(metadata);
-        }
-
-        rktp->rktp_committing_offset = offset;
-
-        rd_kafka_commit(rktp->rktp_rkt->rkt_rk, offsets, 1/*async*/);
-
-        rd_kafka_topic_partition_list_destroy(offsets);
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-/**
- * Handle the next offset to consume for a toppar.
- * This is used during initial setup when trying to figure out what
- * offset to start consuming from.
- *
- * Locality: toppar handler thread.
- * Locks: toppar_lock(rktp) must be held
- */
-void rd_kafka_toppar_next_offset_handle (rd_kafka_toppar_t *rktp,
-                                         int64_t Offset) {
-
-        if (RD_KAFKA_OFFSET_IS_LOGICAL(Offset)) {
-                /* Offset storage returned logical offset (e.g. "end"),
-                 * look it up. */
-                rd_kafka_offset_reset(rktp, Offset, RD_KAFKA_RESP_ERR_NO_ERROR,
-                                      "update");
-                return;
-        }
-
-        /* Adjust by TAIL count if, if wanted */
-        if (rktp->rktp_query_offset <=
-            RD_KAFKA_OFFSET_TAIL_BASE) {
-                int64_t orig_Offset = Offset;
-                int64_t tail_cnt =
-                        llabs(rktp->rktp_query_offset -
-                              RD_KAFKA_OFFSET_TAIL_BASE);
-
-                if (tail_cnt > Offset)
-                        Offset = 0;
-                else
-                        Offset -= tail_cnt;
-
-                rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
-                             "OffsetReply for topic %s [%"PRId32"]: "
-                             "offset %"PRId64": adjusting for "
-                             "OFFSET_TAIL(%"PRId64"): "
-                             "effective offset %"PRId64,
-                             rktp->rktp_rkt->rkt_topic->str,
-                             rktp->rktp_partition,
-                             orig_Offset, tail_cnt,
-                             Offset);
-        }
-
-        rktp->rktp_next_offset = Offset;
-
-        rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_ACTIVE);
-
-        /* Wake-up broker thread which might be idling on IO */
-        if (rktp->rktp_leader)
-                rd_kafka_broker_wakeup(rktp->rktp_leader);
-
-}
-
-
-
-/**
- * Fetch stored offset for a single partition. (simple consumer)
- *
- * Locality: toppar thread
- */
-void rd_kafka_toppar_offset_fetch (rd_kafka_toppar_t *rktp,
-                                   rd_kafka_replyq_t replyq) {
-        rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
-        rd_kafka_topic_partition_list_t *part;
-        rd_kafka_op_t *rko;
-
-        rd_kafka_dbg(rk, TOPIC, "OFFSETREQ",
-                     "Partition %.*s [%"PRId32"]: querying cgrp for "
-                     "stored offset (opv %d)",
-                     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                     rktp->rktp_partition, replyq.version);
-
-        part = rd_kafka_topic_partition_list_new(1);
-        rd_kafka_topic_partition_list_add0(part,
-                                           rktp->rktp_rkt->rkt_topic->str,
-                                           rktp->rktp_partition,
-					   rd_kafka_toppar_keep(rktp));
-
-        rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH);
-	rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-	rko->rko_replyq = replyq;
-
-	rko->rko_u.offset_fetch.partitions = part;
-	rko->rko_u.offset_fetch.do_free = 1;
-
-        rd_kafka_q_enq(rktp->rktp_cgrp->rkcg_ops, rko);
-}
-
-
-
-
-/**
- * Toppar based OffsetResponse handling.
- * This is used for finding the next offset to Fetch.
- *
- * Locality: toppar handler thread
- */
-static void rd_kafka_toppar_handle_Offset (rd_kafka_t *rk,
-					   rd_kafka_broker_t *rkb,
-					   rd_kafka_resp_err_t err,
-					   rd_kafka_buf_t *rkbuf,
-					   rd_kafka_buf_t *request,
-					   void *opaque) {
-        shptr_rd_kafka_toppar_t *s_rktp = opaque;
-        rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp);
-        rd_kafka_topic_partition_list_t *offsets;
-        rd_kafka_topic_partition_t *rktpar;
-        int64_t Offset;
-
-	rd_kafka_toppar_lock(rktp);
-	/* Drop reply from previous partition leader */
-	if (rktp->rktp_leader != rkb)
-		err = RD_KAFKA_RESP_ERR__OUTDATED;
-	rd_kafka_toppar_unlock(rktp);
-
-        offsets = rd_kafka_topic_partition_list_new(1);
-
-        /* Parse and return Offset */
-        err = rd_kafka_handle_Offset(rkb->rkb_rk, rkb, err,
-                                     rkbuf, request, offsets);
-
-	rd_rkb_dbg(rkb, TOPIC, "OFFSET",
-		   "Offset reply for "
-		   "topic %.*s [%"PRId32"] (v%d vs v%d)",
-		   RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-		   rktp->rktp_partition, request->rkbuf_replyq.version,
-		   rktp->rktp_op_version);
-
-	rd_dassert(request->rkbuf_replyq.version > 0);
-	if (err != RD_KAFKA_RESP_ERR__DESTROY &&
-            rd_kafka_buf_version_outdated(request, rktp->rktp_op_version)) {
-		/* Outdated request response, ignore. */
-		    err = RD_KAFKA_RESP_ERR__OUTDATED;
-	}
-
-        if (!err &&
-            (!(rktpar = rd_kafka_topic_partition_list_find(
-                       offsets,
-                       rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition))))
-                err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-
-        if (err) {
-                rd_kafka_op_t *rko;
-
-                rd_rkb_dbg(rkb, TOPIC, "OFFSET",
-                           "Offset reply error for "
-                           "topic %.*s [%"PRId32"] (v%d): %s",
-                           RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                           rktp->rktp_partition, request->rkbuf_replyq.version,
-			   rd_kafka_err2str(err));
-
-                rd_kafka_topic_partition_list_destroy(offsets);
-
-                if (err == RD_KAFKA_RESP_ERR__DESTROY ||
-                    err == RD_KAFKA_RESP_ERR__OUTDATED) {
-                        /* Termination or outdated, quick cleanup. */
-
-                        /* from request.opaque */
-                        rd_kafka_toppar_destroy(s_rktp);
-                        return;
-
-		} else if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS)
-			return; /* Retry in progress */
-
-
-                rd_kafka_toppar_lock(rktp);
-                rd_kafka_offset_reset(rktp, rktp->rktp_query_offset,
-                                      err,
-                                      "failed to query logical offset");
-
-                /* Signal error back to application,
-                 * unless this is an intermittent problem
-                 * (e.g.,connection lost) */
-                rko = rd_kafka_op_new(RD_KAFKA_OP_CONSUMER_ERR);
-                rko->rko_err = err;
-                if (rktp->rktp_query_offset <=
-                    RD_KAFKA_OFFSET_TAIL_BASE)
-                        rko->rko_u.err.offset =
-                                rktp->rktp_query_offset -
-                                RD_KAFKA_OFFSET_TAIL_BASE;
-                else
-                        rko->rko_u.err.offset = rktp->rktp_query_offset;
-                rd_kafka_toppar_unlock(rktp);
-                rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
-                rd_kafka_q_enq(rktp->rktp_fetchq, rko);
-
-                rd_kafka_toppar_destroy(s_rktp); /* from request.opaque */
-                return;
-        }
-
-        Offset = rktpar->offset;
-        rd_kafka_topic_partition_list_destroy(offsets);
-
-	rd_kafka_toppar_lock(rktp);
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
-                     "Offset %s request for %.*s [%"PRId32"] "
-                     "returned offset %s (%"PRId64")",
-                     rd_kafka_offset2str(rktp->rktp_query_offset),
-                     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                     rktp->rktp_partition, rd_kafka_offset2str(Offset), Offset);
-
-        rd_kafka_toppar_next_offset_handle(rktp, Offset);
-	rd_kafka_toppar_unlock(rktp);
-
-        rd_kafka_toppar_destroy(s_rktp); /* from request.opaque */
-}
-
-/**
- * Send OffsetRequest for toppar.
- *
- * If \p backoff_ms is non-zero only the query timer is started,
- * otherwise a query is triggered directly.
- *
- * Locality: toppar handler thread
- * Locks: toppar_lock() must be held
- */
-void rd_kafka_toppar_offset_request (rd_kafka_toppar_t *rktp,
-				     int64_t query_offset, int backoff_ms) {
-	rd_kafka_broker_t *rkb;
-
-	rd_kafka_assert(NULL,
-			thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread));
-
-        rkb = rktp->rktp_leader;
-
-        if (!backoff_ms && (!rkb || rkb->rkb_source == RD_KAFKA_INTERNAL))
-                backoff_ms = 500;
-
-        if (backoff_ms) {
-		rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
-			     "%s [%"PRId32"]: %s"
-			     "starting offset query timer for offset %s",
-			     rktp->rktp_rkt->rkt_topic->str,
-			     rktp->rktp_partition,
-                             !rkb ? "no current leader for partition, " : "",
-			     rd_kafka_offset2str(query_offset));
-
-                rd_kafka_toppar_set_fetch_state(
-                        rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
-		rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers,
-				     &rktp->rktp_offset_query_tmr,
-				     backoff_ms*1000ll,
-				     rd_kafka_offset_query_tmr_cb, rktp);
-		return;
-        }
-
-
-        rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
-                            &rktp->rktp_offset_query_tmr, 1/*lock*/);
-
-
-	if (query_offset == RD_KAFKA_OFFSET_STORED &&
-            rktp->rktp_rkt->rkt_conf.offset_store_method ==
-            RD_KAFKA_OFFSET_METHOD_BROKER) {
-                /*
-                 * Get stored offset from broker based storage:
-                 * ask cgrp manager for offsets
-                 */
-                rd_kafka_toppar_offset_fetch(
-			rktp,
-			RD_KAFKA_REPLYQ(rktp->rktp_ops,
-					rktp->rktp_op_version));
-
-	} else {
-                shptr_rd_kafka_toppar_t *s_rktp;
-                rd_kafka_topic_partition_list_t *offsets;
-
-                /*
-                 * Look up logical offset (end,beginning,tail,..)
-                 */
-
-                rd_rkb_dbg(rkb, TOPIC, "OFFREQ",
-                           "Partition %.*s [%"PRId32"]: querying for logical "
-                           "offset %s (opv %d)",
-                           RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                           rktp->rktp_partition,
-                           rd_kafka_offset2str(query_offset),
-			   rktp->rktp_op_version);
-
-                s_rktp = rd_kafka_toppar_keep(rktp);
-
-		if (query_offset <= RD_KAFKA_OFFSET_TAIL_BASE)
-			query_offset = RD_KAFKA_OFFSET_END;
-
-                offsets = rd_kafka_topic_partition_list_new(1);
-                rd_kafka_topic_partition_list_add(
-                        offsets,
-                        rktp->rktp_rkt->rkt_topic->str,
-                        rktp->rktp_partition)->offset = query_offset;
-
-                rd_kafka_OffsetRequest(rkb, offsets, 0,
-                                       RD_KAFKA_REPLYQ(rktp->rktp_ops,
-                                                       rktp->rktp_op_version),
-                                       rd_kafka_toppar_handle_Offset,
-                                       s_rktp);
-
-                rd_kafka_topic_partition_list_destroy(offsets);
-        }
-
-        rd_kafka_toppar_set_fetch_state(rktp,
-					RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT);
-}
-
-
-/**
- * Start fetching toppar.
- *
- * Locality: toppar handler thread
- * Locks: none
- */
-static void rd_kafka_toppar_fetch_start (rd_kafka_toppar_t *rktp,
-					 int64_t offset,
-					 rd_kafka_op_t *rko_orig) {
-        rd_kafka_cgrp_t *rkcg = rko_orig->rko_u.fetch_start.rkcg;
-        rd_kafka_resp_err_t err = 0;
-        int32_t version = rko_orig->rko_version;
-
-	rd_kafka_toppar_lock(rktp);
-
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH",
-                     "Start fetch for %.*s [%"PRId32"] in "
-                     "state %s at offset %s (v%"PRId32")",
-                     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                     rktp->rktp_partition,
-                     rd_kafka_fetch_states[rktp->rktp_fetch_state],
-                     rd_kafka_offset2str(offset), version);
-
-        if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING) {
-                err = RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
-		rd_kafka_toppar_unlock(rktp);
-                goto err_reply;
-        }
-
-	rktp->rktp_op_version = version;
-
-        if (rkcg) {
-                rd_kafka_assert(rktp->rktp_rkt->rkt_rk, !rktp->rktp_cgrp);
-                /* Attach toppar to cgrp */
-                rktp->rktp_cgrp = rkcg;
-                rd_kafka_cgrp_op(rkcg, rktp, RD_KAFKA_NO_REPLYQ,
-                                 RD_KAFKA_OP_PARTITION_JOIN, 0);
-        }
-
-
-        if (offset == RD_KAFKA_OFFSET_BEGINNING ||
-	    offset == RD_KAFKA_OFFSET_END ||
-            offset <= RD_KAFKA_OFFSET_TAIL_BASE) {
-		rd_kafka_toppar_next_offset_handle(rktp, offset);
-
-	} else if (offset == RD_KAFKA_OFFSET_STORED) {
-                rd_kafka_offset_store_init(rktp);
-
-	} else if (offset == RD_KAFKA_OFFSET_INVALID) {
-		rd_kafka_offset_reset(rktp, offset,
-				      RD_KAFKA_RESP_ERR__NO_OFFSET,
-				      "no previously committed offset "
-				      "available");
-
-	} else {
-		rktp->rktp_next_offset = offset;
-                rd_kafka_toppar_set_fetch_state(rktp,
-						RD_KAFKA_TOPPAR_FETCH_ACTIVE);
-
-                /* Wake-up broker thread which might be idling on IO */
-                if (rktp->rktp_leader)
-                        rd_kafka_broker_wakeup(rktp->rktp_leader);
-
-	}
-
-        rktp->rktp_offsets_fin.eof_offset = RD_KAFKA_OFFSET_INVALID;
-
-	rd_kafka_toppar_unlock(rktp);
-
-        /* Signal back to caller thread that start has commenced, or err */
-err_reply:
-        if (rko_orig->rko_replyq.q) {
-                rd_kafka_op_t *rko;
-
-                rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH_START);
-
-                rko->rko_err = err;
-                rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
-                rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko, 0);
-        }
-}
-
-
-
-
-/**
- * Mark toppar's fetch state as stopped (all decommissioning is done,
- * offsets are stored, etc).
- *
- * Locality: toppar handler thread
- * Locks: toppar_lock(rktp) MUST be held
- */
-void rd_kafka_toppar_fetch_stopped (rd_kafka_toppar_t *rktp,
-                                    rd_kafka_resp_err_t err) {
-
-
-        rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_STOPPED);
-
-        if (rktp->rktp_cgrp) {
-                /* Detach toppar from cgrp */
-                rd_kafka_cgrp_op(rktp->rktp_cgrp, rktp, RD_KAFKA_NO_REPLYQ,
-                                 RD_KAFKA_OP_PARTITION_LEAVE, 0);
-                rktp->rktp_cgrp = NULL;
-        }
-
-        /* Signal back to application thread that stop is done. */
-	if (rktp->rktp_replyq.q) {
-		rd_kafka_op_t *rko;
-		rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH_STOP|RD_KAFKA_OP_REPLY);
-                rko->rko_err = err;
-		rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
-		rd_kafka_replyq_enq(&rktp->rktp_replyq, rko, 0);
-	}
-}
-
-
-/**
- * Stop toppar fetcher.
- * This is usually an async operation.
- *
- * Locality: toppar handler thread
- */
-void rd_kafka_toppar_fetch_stop (rd_kafka_toppar_t *rktp,
-				 rd_kafka_op_t *rko_orig) {
-        int32_t version = rko_orig->rko_version;
-
-	rd_kafka_toppar_lock(rktp);
-
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH",
-                     "Stopping fetch for %.*s [%"PRId32"] in state %s (v%d)",
-                     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                     rktp->rktp_partition,
-                     rd_kafka_fetch_states[rktp->rktp_fetch_state], version);
-
-	rktp->rktp_op_version = version;
-
-	/* Abort pending offset lookups. */
-	if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
-		rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
-				    &rktp->rktp_offset_query_tmr,
-				    1/*lock*/);
-
-        /* Clear out the forwarding queue. */
-        rd_kafka_q_fwd_set(rktp->rktp_fetchq, NULL);
-
-        /* Assign the future replyq to propagate stop results. */
-        rd_kafka_assert(rktp->rktp_rkt->rkt_rk, rktp->rktp_replyq.q == NULL);
-	if (rko_orig) {
-		rktp->rktp_replyq = rko_orig->rko_replyq;
-		rd_kafka_replyq_clear(&rko_orig->rko_replyq);
-	}
-        rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_STOPPING);
-
-        /* Stop offset store (possibly async).
-         * NOTE: will call .._stopped() if store finishes immediately,
-         *       so no more operations after this call! */
-        rd_kafka_offset_store_stop(rktp);
-
-	rd_kafka_toppar_unlock(rktp);
-}
-
-
-/**
- * Update a toppars offset.
- * The toppar must have been previously FETCH_START:ed
- *
- * Locality: toppar handler thread
- */
-void rd_kafka_toppar_seek (rd_kafka_toppar_t *rktp,
-			   int64_t offset, rd_kafka_op_t *rko_orig) {
-        rd_kafka_resp_err_t err = 0;
-        int32_t version = rko_orig->rko_version;
-
-	rd_kafka_toppar_lock(rktp);
-
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH",
-                     "Seek %.*s [%"PRId32"] to offset %s "
-                     "in state %s (v%"PRId32")",
-                     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                     rktp->rktp_partition,
-		     rd_kafka_offset2str(offset),
-                     rd_kafka_fetch_states[rktp->rktp_fetch_state], version);
-
-
-        if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING) {
-                err = RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
-                goto err_reply;
-        } else if (!RD_KAFKA_TOPPAR_FETCH_IS_STARTED(rktp->rktp_fetch_state)) {
-                err = RD_KAFKA_RESP_ERR__STATE;
-                goto err_reply;
-        } else if (offset == RD_KAFKA_OFFSET_STORED) {
-		err = RD_KAFKA_RESP_ERR__INVALID_ARG;
-		goto err_reply;
-	}
-
-	rktp->rktp_op_version = version;
-
-	/* Abort pending offset lookups. */
-	if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
-		rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
-				    &rktp->rktp_offset_query_tmr,
-				    1/*lock*/);
-
-	if (RD_KAFKA_OFFSET_IS_LOGICAL(offset))
-		rd_kafka_toppar_next_offset_handle(rktp, offset);
-	else {
-		rktp->rktp_next_offset = offset;
-                rd_kafka_toppar_set_fetch_state(rktp,
-						RD_KAFKA_TOPPAR_FETCH_ACTIVE);
-
-                /* Wake-up broker thread which might be idling on IO */
-                if (rktp->rktp_leader)
-                        rd_kafka_broker_wakeup(rktp->rktp_leader);
-	}
-
-        /* Signal back to caller thread that seek has commenced, or err */
-err_reply:
-	rd_kafka_toppar_unlock(rktp);
-
-        if (rko_orig && rko_orig->rko_replyq.q) {
-                rd_kafka_op_t *rko;
-
-                rko = rd_kafka_op_new(RD_KAFKA_OP_SEEK|RD_KAFKA_OP_REPLY);
-
-                rko->rko_err = err;
-		rko->rko_u.fetch_start.offset =
-			rko_orig->rko_u.fetch_start.offset;
-                rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
-                rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko, 0);
-        }
-}
-
-
-static void rd_kafka_toppar_pause_resume (rd_kafka_toppar_t *rktp,
-					  rd_kafka_op_t *rko_orig) {
-	rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
-	int pause = rko_orig->rko_u.pause.pause;
-	int flag = rko_orig->rko_u.pause.flag;
-        int32_t version = rko_orig->rko_version;
-
-	rd_kafka_toppar_lock(rktp);
-
-	rktp->rktp_op_version = version;
-
-	if (pause) {
-		/* Pause partition */
-		rktp->rktp_flags |= flag;
-
-		if (rk->rk_type == RD_KAFKA_CONSUMER) {
-			/* Save offset of last consumed message+1 as the
-			 * next message to fetch on resume. */
-			rktp->rktp_next_offset = rktp->rktp_app_offset;
-
-			rd_kafka_dbg(rk, TOPIC, pause?"PAUSE":"RESUME",
-				     "%s %s [%"PRId32"]: at offset %s "
-				     "(state %s, v%d)",
-				     pause ? "Pause":"Resume",
-				     rktp->rktp_rkt->rkt_topic->str,
-				     rktp->rktp_partition,
-				     rd_kafka_offset2str(
-					     rktp->rktp_next_offset),
-				     rd_kafka_fetch_states[rktp->
-							   rktp_fetch_state],
-				     version);
-		} else {
-			rd_kafka_dbg(rk, TOPIC, pause?"PAUSE":"RESUME",
-				     "%s %s [%"PRId32"] (state %s, v%d)",
-				     pause ? "Pause":"Resume",
-				     rktp->rktp_rkt->rkt_topic->str,
-				     rktp->rktp_partition,
-				     rd_kafka_fetch_states[rktp->
-							   rktp_fetch_state],
-				     version);
-			}
-
-	} else {
-		/* Resume partition */
-		rktp->rktp_flags &= ~flag;
-
-		if (rk->rk_type == RD_KAFKA_CONSUMER) {
-			rd_kafka_dbg(rk, TOPIC, pause?"PAUSE":"RESUME",
-				     "%s %s [%"PRId32"]: at offset %s "
-				     "(state %s, v%d)",
-				     rktp->rktp_fetch_state ==
-				     RD_KAFKA_TOPPAR_FETCH_ACTIVE ?
-				     "Resuming" : "Not resuming stopped",
-				     rktp->rktp_rkt->rkt_topic->str,
-				     rktp->rktp_partition,
-				     rd_kafka_offset2str(
-					     rktp->rktp_next_offset),
-				     rd_kafka_fetch_states[rktp->
-							   rktp_fetch_state],
-				     version);
-
-			/* If the resuming offset is logical we
-			 * need to trigger a seek (that performs the
-			 * logical->absolute lookup logic) to get
-			 * things going.
-			 * Typical case is when a partition is paused
-			 * before anything has been consumed by app
-			 * yet thus having rktp_app_offset=INVALID. */
-			if ((rktp->rktp_fetch_state ==
-			     RD_KAFKA_TOPPAR_FETCH_ACTIVE ||
-			     rktp->rktp_fetch_state ==
-			     RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT) &&
-			    rktp->rktp_next_offset == RD_KAFKA_OFFSET_INVALID)
-				rd_kafka_toppar_next_offset_handle(
-					rktp, rktp->rktp_next_offset);
-
-		} else
-			rd_kafka_dbg(rk, TOPIC, pause?"PAUSE":"RESUME",
-				     "%s %s [%"PRId32"] (state %s, v%d)",
-				     pause ? "Pause":"Resume",
-				     rktp->rktp_rkt->rkt_topic->str,
-				     rktp->rktp_partition,
-				     rd_kafka_fetch_states[rktp->
-							   rktp_fetch_state],
-				     version);
-	}
-	rd_kafka_toppar_unlock(rktp);
-
-	if (pause && rk->rk_type == RD_KAFKA_CONSUMER) {
-		/* Flush partition's fetch queue */
-		rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp,
-						rko_orig->rko_version);
-	}
-}
-
-
-
-
-/**
- * Add toppar to fetch list.
- *
- * Locality: broker thread
- * Locks: none
- */
-static RD_INLINE void rd_kafka_broker_fetch_toppar_add (rd_kafka_broker_t *rkb,
-                                                       rd_kafka_toppar_t *rktp){
-        if (rktp->rktp_fetch)
-                return; /* Already added */
-
-        CIRCLEQ_INSERT_TAIL(&rkb->rkb_fetch_toppars, rktp, rktp_fetchlink);
-        rkb->rkb_fetch_toppar_cnt++;
-        rktp->rktp_fetch = 1;
-
-        if (unlikely(rkb->rkb_fetch_toppar_cnt == 1))
-                rd_kafka_broker_fetch_toppar_next(rkb, rktp);
-
-        rd_rkb_dbg(rkb, TOPIC, "FETCHADD",
-                   "Added %.*s [%"PRId32"] to fetch list (%d entries, opv %d)",
-                   RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                   rktp->rktp_partition,
-                   rkb->rkb_fetch_toppar_cnt, rktp->rktp_fetch_version);
-}
-
-
-/**
- * Remove toppar from fetch list.
- *
- * Locality: broker thread
- * Locks: none
- */
-static RD_INLINE void rd_kafka_broker_fetch_toppar_del (rd_kafka_broker_t *rkb,
-                                                       rd_kafka_toppar_t *rktp){
-        if (!rktp->rktp_fetch)
-                return; /* Not added */
-
-        CIRCLEQ_REMOVE(&rkb->rkb_fetch_toppars, rktp, rktp_fetchlink);
-        rd_kafka_assert(NULL, rkb->rkb_fetch_toppar_cnt > 0);
-        rkb->rkb_fetch_toppar_cnt--;
-        rktp->rktp_fetch = 0;
-
-        if (rkb->rkb_fetch_toppar_next == rktp) {
-                /* Update next pointer */
-                rd_kafka_broker_fetch_toppar_next(
-			rkb, CIRCLEQ_LOOP_NEXT(&rkb->rkb_fetch_toppars,
-					       rktp, rktp_fetchlink));
-        }
-
-        rd_rkb_dbg(rkb, TOPIC, "FETCHADD",
-                   "Removed %.*s [%"PRId32"] from fetch list "
-                   "(%d entries, opv %d)",
-                   RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                   rktp->rktp_partition,
-                   rkb->rkb_fetch_toppar_cnt, rktp->rktp_fetch_version);
-
-}
-
-
-
-/**
- * @brief Decide whether this toppar should be on the fetch list or not.
- *
- * Also:
- *  - update toppar's op version (for broker thread's copy)
- *  - finalize statistics (move rktp_offsets to rktp_offsets_fin)
- *
- * @returns the partition's Fetch backoff timestamp, or 0 if no backoff.
- *
- * @locality broker thread
- */
-rd_ts_t rd_kafka_toppar_fetch_decide (rd_kafka_toppar_t *rktp,
-				   rd_kafka_broker_t *rkb,
-				   int force_remove) {
-        int should_fetch = 1;
-        const char *reason = "";
-        int32_t version;
-        rd_ts_t ts_backoff = 0;
-
-	rd_kafka_toppar_lock(rktp);
-
-	/* Forced removal from fetch list */
-	if (unlikely(force_remove)) {
-		reason = "forced removal";
-		should_fetch = 0;
-		goto done;
-	}
-
-	if (unlikely((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE) != 0)) {
-		reason = "partition removed";
-		should_fetch = 0;
-		goto done;
-	}
-
-	/* Skip toppars not in active fetch state */
-	if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_ACTIVE) {
-                reason = "not in active fetch state";
-		should_fetch = 0;
-		goto done;
-	}
-
-        /* Update broker thread's fetch op version */
-        version = rktp->rktp_op_version;
-        if (version > rktp->rktp_fetch_version ||
-	    rktp->rktp_next_offset != rktp->rktp_last_next_offset) {
-                /* New version barrier, something was modified from the
-                 * control plane. Reset and start over.
-		 * Alternatively only the next_offset changed but not the
-		 * barrier, which is the case when automatically triggering
-		 * offset.reset (such as on PARTITION_EOF). */
-
-                rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCHDEC",
-                             "Topic %s [%"PRId32"]: fetch decide: "
-                             "updating to version %d (was %d) at "
-                             "offset %"PRId64" (was %"PRId64")",
-                             rktp->rktp_rkt->rkt_topic->str,
-                             rktp->rktp_partition,
-                             version, rktp->rktp_fetch_version,
-                             rktp->rktp_next_offset,
-                             rktp->rktp_offsets.fetch_offset);
-
-                rd_kafka_offset_stats_reset(&rktp->rktp_offsets);
-
-                /* New start offset */
-                rktp->rktp_offsets.fetch_offset = rktp->rktp_next_offset;
-		rktp->rktp_last_next_offset = rktp->rktp_next_offset;
-
-                rktp->rktp_fetch_version = version;
-
-                rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp,
-                                                version);
-        }
-
-
-	if (RD_KAFKA_TOPPAR_IS_PAUSED(rktp)) {
-		should_fetch = 0;
-		reason = "paused";
-
-	} else if (RD_KAFKA_OFFSET_IS_LOGICAL(rktp->rktp_next_offset)) {
-                should_fetch = 0;
-                reason = "no concrete offset";
-
-        } else if (rd_kafka_q_len(rktp->rktp_fetchq) >=
-		   rkb->rkb_rk->rk_conf.queued_min_msgs) {
-		/* Skip toppars who's local message queue is already above
-		 * the lower threshold. */
-                reason = "queued.min.messages exceeded";
-                should_fetch = 0;
-
-        } else if ((int64_t)rd_kafka_q_size(rktp->rktp_fetchq) >=
-            rkb->rkb_rk->rk_conf.queued_max_msg_bytes) {
-                reason = "queued.max.messages.kbytes exceeded";
-                should_fetch = 0;
-
-        } else if (rktp->rktp_ts_fetch_backoff > rd_clock()) {
-                reason = "fetch backed off";
-                ts_backoff = rktp->rktp_ts_fetch_backoff;
-                should_fetch = 0;
-        }
-
- done:
-        /* Copy offset stats to finalized place holder. */
-        rktp->rktp_offsets_fin = rktp->rktp_offsets;
-
-        if (rktp->rktp_fetch != should_fetch) {
-                rd_rkb_dbg(rkb, FETCH, "FETCH",
-                           "Topic %s [%"PRId32"] in state %s at offset %s "
-                           "(%d/%d msgs, %"PRId64"/%d kb queued, "
-			   "opv %"PRId32") is %sfetchable: %s",
-                           rktp->rktp_rkt->rkt_topic->str,
-                           rktp->rktp_partition,
-			   rd_kafka_fetch_states[rktp->rktp_fetch_state],
-                           rd_kafka_offset2str(rktp->rktp_next_offset),
-                           rd_kafka_q_len(rktp->rktp_fetchq),
-                           rkb->rkb_rk->rk_conf.queued_min_msgs,
-                           rd_kafka_q_size(rktp->rktp_fetchq) / 1024,
-                           rkb->rkb_rk->rk_conf.queued_max_msg_kbytes,
-			   rktp->rktp_fetch_version,
-                           should_fetch ? "" : "not ", reason);
-
-                if (should_fetch) {
-			rd_dassert(rktp->rktp_fetch_version > 0);
-                        rd_kafka_broker_fetch_toppar_add(rkb, rktp);
-                } else {
-                        rd_kafka_broker_fetch_toppar_del(rkb, rktp);
-                        /* Non-fetching partitions will have an
-                         * indefinate backoff, unless explicitly specified. */
-                        if (!ts_backoff)
-                                ts_backoff = RD_TS_MAX;
-                }
-        }
-
-        rd_kafka_toppar_unlock(rktp);
-
-        return ts_backoff;
-}
-
-
-/**
- * @brief Serve a toppar in a consumer broker thread.
- *        This is considered the fast path and should be minimal,
- *        mostly focusing on fetch related mechanisms.
- *
- * @returns the partition's Fetch backoff timestamp, or 0 if no backoff.
- *
- * @locality broker thread
- * @locks none
- */
-rd_ts_t rd_kafka_broker_consumer_toppar_serve (rd_kafka_broker_t *rkb,
-                                               rd_kafka_toppar_t *rktp) {
-        return rd_kafka_toppar_fetch_decide(rktp, rkb, 0);
-}
-
-
-
-/**
- * Serve a toppar op
- * 'rktp' may be NULL for certain ops (OP_RECV_BUF)
- *
- * @locality toppar handler thread
- */
-static rd_kafka_op_res_t
-rd_kafka_toppar_op_serve (rd_kafka_t *rk,
-                          rd_kafka_q_t *rkq, rd_kafka_op_t *rko,
-                          rd_kafka_q_cb_type_t cb_type, void *opaque) {
-	rd_kafka_toppar_t *rktp = NULL;
-	int outdated = 0;
-
-	if (rko->rko_rktp)
-		rktp = rd_kafka_toppar_s2i(rko->rko_rktp);
-
-	if (rktp) {
-		outdated = rd_kafka_op_version_outdated(rko,
-							rktp->rktp_op_version);
-
-		rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OP",
-			     "%.*s [%"PRId32"] received %sop %s "
-			     "(v%"PRId32") in fetch-state %s (opv%d)",
-			     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-			     rktp->rktp_partition,
-			     outdated ? "outdated ": "",
-			     rd_kafka_op2str(rko->rko_type),
-			     rko->rko_version,
-			     rd_kafka_fetch_states[rktp->rktp_fetch_state],
-			     rktp->rktp_op_version);
-
-		if (outdated) {
-#if ENABLE_DEVEL
-			rd_kafka_op_print(stdout, "PART_OUTDATED", rko);
-#endif
-                        rd_kafka_op_destroy(rko);
-			return RD_KAFKA_OP_RES_HANDLED;
-		}
-	}
-
-	switch ((int)rko->rko_type)
-	{
-	case RD_KAFKA_OP_FETCH_START:
-		rd_kafka_toppar_fetch_start(rktp,
-					    rko->rko_u.fetch_start.offset, rko);
-		break;
-
-	case RD_KAFKA_OP_FETCH_STOP:
-		rd_kafka_toppar_fetch_stop(rktp, rko);
-		break;
-
-	case RD_KAFKA_OP_SEEK:
-		rd_kafka_toppar_seek(rktp, rko->rko_u.fetch_start.offset, rko);
-		break;
-
-	case RD_KAFKA_OP_PAUSE:
-		rd_kafka_toppar_pause_resume(rktp, rko);
-		break;
-
-        case RD_KAFKA_OP_OFFSET_COMMIT | RD_KAFKA_OP_REPLY:
-                rd_kafka_assert(NULL, rko->rko_u.offset_commit.cb);
-                rko->rko_u.offset_commit.cb(
-                        rk, rko->rko_err,
-                        rko->rko_u.offset_commit.partitions,
-                        rko->rko_u.offset_commit.opaque);
-                break;
-
-	case RD_KAFKA_OP_OFFSET_FETCH | RD_KAFKA_OP_REPLY:
-        {
-                /* OffsetFetch reply */
-                rd_kafka_topic_partition_list_t *offsets =
-			rko->rko_u.offset_fetch.partitions;
-                shptr_rd_kafka_toppar_t *s_rktp;
-		int64_t offset = RD_KAFKA_OFFSET_INVALID;
-
-                s_rktp = offsets->elems[0]._private;
-                if (!rko->rko_err) {
-                        /* Request succeeded but per-partition might have failed */
-                        rko->rko_err = offsets->elems[0].err;
-			offset       = offsets->elems[0].offset;
-                }
-                offsets->elems[0]._private = NULL;
-                rd_kafka_topic_partition_list_destroy(offsets);
-		rko->rko_u.offset_fetch.partitions = NULL;
-                rktp = rd_kafka_toppar_s2i(s_rktp);
-
-		rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
-				    &rktp->rktp_offset_query_tmr,
-				    1/*lock*/);
-
-		rd_kafka_toppar_lock(rktp);
-
-		if (rko->rko_err) {
-			rd_kafka_dbg(rktp->rktp_rkt->rkt_rk,
-				     TOPIC, "OFFSET",
-				     "Failed to fetch offset for "
-				     "%.*s [%"PRId32"]: %s",
-				     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-				     rktp->rktp_partition,
-				     rd_kafka_err2str(rko->rko_err));
-
-			/* Keep on querying until we succeed. */
-			rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
-
-			rd_kafka_toppar_unlock(rktp);
-
-			rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers,
-					     &rktp->rktp_offset_query_tmr,
-					     500*1000,
-					     rd_kafka_offset_query_tmr_cb,
-					     rktp);
-
-			/* Propagate error to application */
-			if (rko->rko_err != RD_KAFKA_RESP_ERR__WAIT_COORD) {
-				rd_kafka_q_op_err(rktp->rktp_fetchq,
-						  RD_KAFKA_OP_ERR, rko->rko_err,
-						  0, rktp, 0,
-						  "Failed to fetch "
-						  "offsets from brokers: %s",
-						  rd_kafka_err2str(rko->rko_err));
-			}
-
-			rd_kafka_toppar_destroy(s_rktp);
-
-			break;
-		}
-
-		rd_kafka_dbg(rktp->rktp_rkt->rkt_rk,
-			     TOPIC, "OFFSET",
-			     "%.*s [%"PRId32"]: OffsetFetch returned "
-			     "offset %s (%"PRId64")",
-			     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-			     rktp->rktp_partition,
-			     rd_kafka_offset2str(offset), offset);
-
-		if (offset > 0)
-			rktp->rktp_committed_offset = offset;
-
-		if (offset >= 0)
-			rd_kafka_toppar_next_offset_handle(rktp, offset);
-		else
-			rd_kafka_offset_reset(rktp, offset,
-					      RD_KAFKA_RESP_ERR__NO_OFFSET,
-					      "no previously committed offset "
-					      "available");
-		rd_kafka_toppar_unlock(rktp);
-
-                rd_kafka_toppar_destroy(s_rktp);
-        }
-        break;
-
-        default:
-                rd_kafka_assert(NULL, !*"unknown type");
-                break;
-        }
-
-        rd_kafka_op_destroy(rko);
-
-        return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-
-
-
-/**
- * Send command op to toppar (handled by toppar's thread).
- *
- * Locality: any thread
- */
-static void rd_kafka_toppar_op0 (rd_kafka_toppar_t *rktp, rd_kafka_op_t *rko,
-				 rd_kafka_replyq_t replyq) {
-        rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-	rko->rko_replyq = replyq;
-
-        rd_kafka_q_enq(rktp->rktp_ops, rko);
-}
-
-
-/**
- * Send command op to toppar (handled by toppar's thread).
- *
- * Locality: any thread
- */
-static void rd_kafka_toppar_op (rd_kafka_toppar_t *rktp,
-				rd_kafka_op_type_t type, int32_t version,
-				int64_t offset, rd_kafka_cgrp_t *rkcg,
-				rd_kafka_replyq_t replyq) {
-        rd_kafka_op_t *rko;
-
-        rko = rd_kafka_op_new(type);
-	rko->rko_version = version;
-        if (type == RD_KAFKA_OP_FETCH_START ||
-	    type == RD_KAFKA_OP_SEEK) {
-		if (rkcg)
-			rko->rko_u.fetch_start.rkcg = rkcg;
-		rko->rko_u.fetch_start.offset = offset;
-	}
-
-	rd_kafka_toppar_op0(rktp, rko, replyq);
-}
-
-
-
-/**
- * Start consuming partition (async operation).
- *  'offset' is the initial offset
- *  'fwdq' is an optional queue to forward messages to, if this is NULL
- *  then messages will be enqueued on rktp_fetchq.
- *  'replyq' is an optional queue for handling the consume_start ack.
- *
- * This is the thread-safe interface that can be called from any thread.
- */
-rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start (rd_kafka_toppar_t *rktp,
-                                                    int64_t offset,
-                                                    rd_kafka_q_t *fwdq,
-                                                    rd_kafka_replyq_t replyq) {
-	int32_t version;
-
-        rd_kafka_q_lock(rktp->rktp_fetchq);
-        if (fwdq && !(rktp->rktp_fetchq->rkq_flags & RD_KAFKA_Q_F_FWD_APP))
-                rd_kafka_q_fwd_set0(rktp->rktp_fetchq, fwdq,
-                                    0, /* no do_lock */
-                                    0 /* no fwd_app */);
-        rd_kafka_q_unlock(rktp->rktp_fetchq);
-
-	/* Bump version barrier. */
-	version = rd_kafka_toppar_version_new_barrier(rktp);
-
-	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER",
-		     "Start consuming %.*s [%"PRId32"] at "
-		     "offset %s (v%"PRId32")",
-		     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-		     rktp->rktp_partition, rd_kafka_offset2str(offset),
-		     version);
-
-        rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_START, version,
-                           offset, rktp->rktp_rkt->rkt_rk->rk_cgrp, replyq);
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * Stop consuming partition (async operatoin)
- * This is thread-safe interface that can be called from any thread.
- *
- * Locality: any thread
- */
-rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop (rd_kafka_toppar_t *rktp,
-                                                   rd_kafka_replyq_t replyq) {
-	int32_t version;
-
-	/* Bump version barrier. */
-        version = rd_kafka_toppar_version_new_barrier(rktp);
-
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER",
-		     "Stop consuming %.*s [%"PRId32"] (v%"PRId32")",
-		     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-		     rktp->rktp_partition, version);
-
-        rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_STOP, version,
-			   0, NULL, replyq);
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * Set/Seek offset of a consumed partition (async operation).
- *  'offset' is the target offset
- *  'replyq' is an optional queue for handling the ack.
- *
- * This is the thread-safe interface that can be called from any thread.
- */
-rd_kafka_resp_err_t rd_kafka_toppar_op_seek (rd_kafka_toppar_t *rktp,
-                                             int64_t offset,
-                                             rd_kafka_replyq_t replyq) {
-	int32_t version;
-
-	/* Bump version barrier. */
-	version = rd_kafka_toppar_version_new_barrier(rktp);
-
-	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER",
-		     "Seek %.*s [%"PRId32"] to "
-		     "offset %s (v%"PRId32")",
-		     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-		     rktp->rktp_partition, rd_kafka_offset2str(offset),
-		     version);
-
-        rd_kafka_toppar_op(rktp, RD_KAFKA_OP_SEEK, version,
-			   offset, NULL, replyq);
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * Pause/resume partition (async operation).
- * \p flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE
- * depending on if the app paused or librdkafka.
- * \p pause is 1 for pausing or 0 for resuming.
- *
- * Locality: any
- */
-static rd_kafka_resp_err_t
-rd_kafka_toppar_op_pause_resume (rd_kafka_toppar_t *rktp,
-				 int pause, int flag) {
-	int32_t version;
-	rd_kafka_op_t *rko;
-
-	/* Bump version barrier. */
-	version = rd_kafka_toppar_version_new_barrier(rktp);
-
-	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, pause ? "PAUSE":"RESUME",
-		     "%s %.*s [%"PRId32"] (v%"PRId32")",
-		     pause ? "Pause" : "Resume",
-		     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-		     rktp->rktp_partition, version);
-
-	rko = rd_kafka_op_new(RD_KAFKA_OP_PAUSE);
-	rko->rko_version = version;
-	rko->rko_u.pause.pause = pause;
-	rko->rko_u.pause.flag = flag;
-
-	rd_kafka_toppar_op0(rktp, rko, RD_KAFKA_NO_REPLYQ);
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-
-
-/**
- * Pause or resume a list of partitions.
- * \p flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE
- * depending on if the app paused or librdkafka.
- * \p pause is 1 for pausing or 0 for resuming.
- *
- * Locality: any
- *
- * @remark This is an asynchronous call, the actual pause/resume is performed
- *         by toppar_pause() in the toppar's handler thread.
- */
-rd_kafka_resp_err_t
-rd_kafka_toppars_pause_resume (rd_kafka_t *rk, int pause, int flag,
-			       rd_kafka_topic_partition_list_t *partitions) {
-	int i;
-
-	rd_kafka_dbg(rk, TOPIC, pause ? "PAUSE":"RESUME",
-		     "%s %s %d partition(s)",
-		     flag & RD_KAFKA_TOPPAR_F_APP_PAUSE ? "Application" : "Library",
-		     pause ? "pausing" : "resuming", partitions->cnt);
-
-	for (i = 0 ; i < partitions->cnt ; i++) {
-		rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
-		shptr_rd_kafka_toppar_t *s_rktp;
-		rd_kafka_toppar_t *rktp;
-
-                s_rktp = rd_kafka_topic_partition_list_get_toppar(rk, rktpar);
-		if (!s_rktp) {
-			rd_kafka_dbg(rk, TOPIC, pause ? "PAUSE":"RESUME",
-				     "%s %s [%"PRId32"]: skipped: "
-				     "unknown partition",
-				     pause ? "Pause":"Resume",
-				     rktpar->topic, rktpar->partition);
-
-			rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-			continue;
-		}
-
-		rktp = rd_kafka_toppar_s2i(s_rktp);
-
-		rd_kafka_toppar_op_pause_resume(rktp, pause, flag);
-
-		rd_kafka_toppar_destroy(s_rktp);
-
-		rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
-	}
-
-	return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-
-
-/**
- * Propagate error for toppar
- */
-void rd_kafka_toppar_enq_error (rd_kafka_toppar_t *rktp,
-                                rd_kafka_resp_err_t err) {
-        rd_kafka_op_t *rko;
-
-        rko = rd_kafka_op_new(RD_KAFKA_OP_ERR);
-        rko->rko_err  = err;
-        rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-        rko->rko_u.err.errstr = rd_strdup(rd_kafka_err2str(rko->rko_err));
-
-        rd_kafka_q_enq(rktp->rktp_fetchq, rko);
-}
-
-
-
-
-
-/**
- * Returns the local leader broker for this toppar.
- * If \p proper_broker is set NULL will be returned if current handler
- * is not a proper broker (INTERNAL broker).
- *
- * The returned broker has an increased refcount.
- *
- * Locks: none
- */
-rd_kafka_broker_t *rd_kafka_toppar_leader (rd_kafka_toppar_t *rktp,
-                                           int proper_broker) {
-        rd_kafka_broker_t *rkb;
-        rd_kafka_toppar_lock(rktp);
-        rkb = rktp->rktp_leader;
-        if (rkb) {
-                if (proper_broker && rkb->rkb_source == RD_KAFKA_INTERNAL)
-                        rkb = NULL;
-                else
-                        rd_kafka_broker_keep(rkb);
-        }
-        rd_kafka_toppar_unlock(rktp);
-
-        return rkb;
-}
-
-
-/**
- * @brief Take action when partition leader becomes unavailable.
- *        This should be called when leader-specific requests fail with
- *        NOT_LEADER_FOR.. or similar error codes, e.g. ProduceRequest.
- *
- * @locks none
- * @locality any
- */
-void rd_kafka_toppar_leader_unavailable (rd_kafka_toppar_t *rktp,
-                                         const char *reason,
-                                         rd_kafka_resp_err_t err) {
-        rd_kafka_itopic_t *rkt = rktp->rktp_rkt;
-
-        rd_kafka_dbg(rkt->rkt_rk, TOPIC, "LEADERUA",
-                     "%s [%"PRId32"]: leader unavailable: %s: %s",
-                     rkt->rkt_topic->str, rktp->rktp_partition, reason,
-                     rd_kafka_err2str(err));
-
-        rd_kafka_topic_wrlock(rkt);
-        rkt->rkt_flags |= RD_KAFKA_TOPIC_F_LEADER_UNAVAIL;
-        rd_kafka_topic_wrunlock(rkt);
-
-        rd_kafka_topic_fast_leader_query(rkt->rkt_rk);
-}
-
-
-const char *
-rd_kafka_topic_partition_topic (const rd_kafka_topic_partition_t *rktpar) {
-        const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar;
-        return rktp->rktp_rkt->rkt_topic->str;
-}
-
-int32_t
-rd_kafka_topic_partition_partition (const rd_kafka_topic_partition_t *rktpar) {
-        const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar;
-        return rktp->rktp_partition;
-}
-
-void rd_kafka_topic_partition_get (const rd_kafka_topic_partition_t *rktpar,
-                                   const char **name, int32_t *partition) {
-        const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar;
-        *name = rktp->rktp_rkt->rkt_topic->str;
-        *partition = rktp->rktp_partition;
-}
-
-
-
-
-/**
- *
- * rd_kafka_topic_partition_t lists
- * Fixed-size non-growable list of partitions for propagation to application.
- *
- */
-
-
-static void
-rd_kafka_topic_partition_list_grow (rd_kafka_topic_partition_list_t *rktparlist,
-                                    int add_size) {
-        if (add_size < rktparlist->size)
-                add_size = RD_MAX(rktparlist->size, 32);
-
-        rktparlist->size += add_size;
-        rktparlist->elems = rd_realloc(rktparlist->elems,
-                                       sizeof(*rktparlist->elems) *
-                                       rktparlist->size);
-
-}
-/**
- * Create a list for fitting 'size' topic_partitions (rktp).
- */
-rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new (int size) {
-        rd_kafka_topic_partition_list_t *rktparlist;
-
-        rktparlist = rd_calloc(1, sizeof(*rktparlist));
-
-        rktparlist->size = size;
-        rktparlist->cnt = 0;
-
-        if (size > 0)
-                rd_kafka_topic_partition_list_grow(rktparlist, size);
-
-        return rktparlist;
-}
-
-
-
-rd_kafka_topic_partition_t *rd_kafka_topic_partition_new (const char *topic,
-							  int32_t partition) {
-	rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar));
-
-	rktpar->topic = rd_strdup(topic);
-	rktpar->partition = partition;
-
-	return rktpar;
-}
-
-
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_new_from_rktp (rd_kafka_toppar_t *rktp) {
-	rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar));
-
-	rktpar->topic = RD_KAFKAP_STR_DUP(rktp->rktp_rkt->rkt_topic);
-	rktpar->partition = rktp->rktp_partition;
-
-	return rktpar;
-}
-
-
-
-static void
-rd_kafka_topic_partition_destroy0 (rd_kafka_topic_partition_t *rktpar, int do_free) {
-	if (rktpar->topic)
-		rd_free(rktpar->topic);
-	if (rktpar->metadata)
-		rd_free(rktpar->metadata);
-	if (rktpar->_private)
-		rd_kafka_toppar_destroy((shptr_rd_kafka_toppar_t *)
-					rktpar->_private);
-
-	if (do_free)
-		rd_free(rktpar);
-}
-
-void rd_kafka_topic_partition_destroy (rd_kafka_topic_partition_t *rktpar) {
-	rd_kafka_topic_partition_destroy0(rktpar, 1);
-}
-
-
-/**
- * Destroys a list previously created with .._list_new() and drops
- * any references to contained toppars.
- */
-void
-rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rktparlist) {
-        int i;
-
-        for (i = 0 ; i < rktparlist->cnt ; i++)
-		rd_kafka_topic_partition_destroy0(&rktparlist->elems[i], 0);
-
-        if (rktparlist->elems)
-                rd_free(rktparlist->elems);
-
-        rd_free(rktparlist);
-}
-
-
-/**
- * Add a partition to an rktpar list.
- * The list must have enough room to fit it.
- *
- * '_private' must be NULL or a valid 'shptr_rd_kafka_toppar_t *'.
- *
- * Returns a pointer to the added element.
- */
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_list_add0 (rd_kafka_topic_partition_list_t *rktparlist,
-                                    const char *topic, int32_t partition,
-				    shptr_rd_kafka_toppar_t *_private) {
-        rd_kafka_topic_partition_t *rktpar;
-        if (rktparlist->cnt == rktparlist->size)
-                rd_kafka_topic_partition_list_grow(rktparlist, 1);
-        rd_kafka_assert(NULL, rktparlist->cnt < rktparlist->size);
-
-        rktpar = &rktparlist->elems[rktparlist->cnt++];
-        memset(rktpar, 0, sizeof(*rktpar));
-        rktpar->topic = rd_strdup(topic);
-        rktpar->partition = partition;
-	rktpar->offset = RD_KAFKA_OFFSET_INVALID;
-        rktpar->_private = _private;
-
-        return rktpar;
-}
-
-
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_list_add (rd_kafka_topic_partition_list_t *rktparlist,
-                                   const char *topic, int32_t partition) {
-        return rd_kafka_topic_partition_list_add0(rktparlist,
-                                                  topic, partition, NULL);
-}
-
-
-/**
- * Adds a consecutive list of partitions to a list
- */
-void
-rd_kafka_topic_partition_list_add_range (rd_kafka_topic_partition_list_t
-                                         *rktparlist,
-                                         const char *topic,
-                                         int32_t start, int32_t stop) {
-
-        for (; start <= stop ; start++)
-                rd_kafka_topic_partition_list_add(rktparlist, topic, start);
-}
-
-
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_list_upsert (
-        rd_kafka_topic_partition_list_t *rktparlist,
-        const char *topic, int32_t partition) {
-        rd_kafka_topic_partition_t *rktpar;
-
-        if ((rktpar = rd_kafka_topic_partition_list_find(rktparlist,
-                                                         topic, partition)))
-                return rktpar;
-
-        return rd_kafka_topic_partition_list_add(rktparlist, topic, partition);
-}
-
-/**
- * @brief Creates a copy of \p rktpar and adds it to \p rktparlist
- */
-void
-rd_kafka_topic_partition_copy (rd_kafka_topic_partition_list_t *rktparlist,
-                               const rd_kafka_topic_partition_t *rktpar) {
-        rd_kafka_topic_partition_t *dst;
-
-        dst = rd_kafka_topic_partition_list_add0(
-                rktparlist,
-                rktpar->topic,
-                rktpar->partition,
-                rktpar->_private ?
-                rd_kafka_toppar_keep(
-                        rd_kafka_toppar_s2i((shptr_rd_kafka_toppar_t *)
-                                            rktpar->_private)) : NULL);
-        dst->offset = rktpar->offset;
-        dst->opaque = rktpar->opaque;
-        dst->err    = rktpar->err;
-        if (rktpar->metadata_size > 0) {
-                dst->metadata =
-                        rd_malloc(rktpar->metadata_size);
-                dst->metadata_size = rktpar->metadata_size;
-                memcpy((void *)dst->metadata, rktpar->metadata,
-                       rktpar->metadata_size);
-        }
-}
-
-
-
-/**
- * Create and return a copy of list 'src'
- */
-rd_kafka_topic_partition_list_t *
-rd_kafka_topic_partition_list_copy (const rd_kafka_topic_partition_list_t *src){
-        rd_kafka_topic_partition_list_t *dst;
-        int i;
-
-        dst = rd_kafka_topic_partition_list_new(src->size);
-
-        for (i = 0 ; i < src->cnt ; i++)
-                rd_kafka_topic_partition_copy(dst, &src->elems[i]);
-        return dst;
-}
-
-/**
- * @returns (and sets if necessary) the \p rktpar's _private / toppar.
- * @remark a new reference is returned.
- */
-shptr_rd_kafka_toppar_t *
-rd_kafka_topic_partition_get_toppar (rd_kafka_t *rk,
-                                     rd_kafka_topic_partition_t *rktpar) {
-        shptr_rd_kafka_toppar_t *s_rktp;
-
-        if (!(s_rktp = rktpar->_private))
-                s_rktp = rktpar->_private =
-                        rd_kafka_toppar_get2(rk,
-                                             rktpar->topic,
-                                             rktpar->partition, 0, 0);
-        if (!s_rktp)
-                return NULL;
-
-        return rd_kafka_toppar_keep(rd_kafka_toppar_s2i(s_rktp));
-}
-
-
-static int rd_kafka_topic_partition_cmp (const void *_a, const void *_b,
-                                         void *opaque) {
-        const rd_kafka_topic_partition_t *a = _a;
-        const rd_kafka_topic_partition_t *b = _b;
-        int r = strcmp(a->topic, b->topic);
-        if (r)
-                return r;
-        else
-                return a->partition - b->partition;
-}
-
-
-/**
- * @brief Search 'rktparlist' for 'topic' and 'partition'.
- * @returns the elems[] index or -1 on miss.
- */
-int
-rd_kafka_topic_partition_list_find0 (rd_kafka_topic_partition_list_t *rktparlist,
-				     const char *topic, int32_t partition) {
-        rd_kafka_topic_partition_t skel;
-        int i;
-
-        skel.topic = (char *)topic;
-        skel.partition = partition;
-
-        for (i = 0 ; i < rktparlist->cnt ; i++) {
-                if (!rd_kafka_topic_partition_cmp(&skel,
-                                                  &rktparlist->elems[i],
-                                                  NULL))
-                        return i;
-        }
-
-        return -1;
-}
-
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_list_find (rd_kafka_topic_partition_list_t *rktparlist,
-				     const char *topic, int32_t partition) {
-	int i = rd_kafka_topic_partition_list_find0(rktparlist,
-						    topic, partition);
-	if (i == -1)
-		return NULL;
-	else
-		return &rktparlist->elems[i];
-}
-
-
-int
-rd_kafka_topic_partition_list_del_by_idx (rd_kafka_topic_partition_list_t *rktparlist,
-					  int idx) {
-	if (unlikely(idx < 0 || idx >= rktparlist->cnt))
-		return 0;
-
-	rktparlist->cnt--;
-	rd_kafka_topic_partition_destroy0(&rktparlist->elems[idx], 0);
-	memmove(&rktparlist->elems[idx], &rktparlist->elems[idx+1],
-		(rktparlist->cnt - idx) * sizeof(rktparlist->elems[idx]));
-
-	return 1;
-}
-
-
-int
-rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist,
-				   const char *topic, int32_t partition) {
-	int i = rd_kafka_topic_partition_list_find0(rktparlist,
-						    topic, partition);
-	if (i == -1)
-		return 0;
-
-	return rd_kafka_topic_partition_list_del_by_idx(rktparlist, i);
-}
-
-
-
-/**
- * Returns true if 'topic' matches the 'rktpar', else false.
- * On match, if rktpar is a regex pattern then 'matched_by_regex' is set to 1.
- */
-int rd_kafka_topic_partition_match (rd_kafka_t *rk,
-				    const rd_kafka_group_member_t *rkgm,
-				    const rd_kafka_topic_partition_t *rktpar,
-				    const char *topic, int *matched_by_regex) {
-	int ret = 0;
-
-	if (*rktpar->topic == '^') {
-		char errstr[128];
-
-		ret = rd_regex_match(rktpar->topic, topic,
-				     errstr, sizeof(errstr));
-		if (ret == -1) {
-			rd_kafka_dbg(rk, CGRP,
-				     "SUBMATCH",
-				     "Invalid regex for member "
-				     "\"%.*s\" subscription \"%s\": %s",
-				     RD_KAFKAP_STR_PR(rkgm->rkgm_member_id),
-				     rktpar->topic, errstr);
-			return 0;
-		}
-
-		if (ret && matched_by_regex)
-			*matched_by_regex = 1;
-
-	} else if (!strcmp(rktpar->topic, topic)) {
-
-		if (matched_by_regex)
-			*matched_by_regex = 0;
-
-		ret = 1;
-	}
-
-	return ret;
-}
-
-
-
-void rd_kafka_topic_partition_list_sort (
-        rd_kafka_topic_partition_list_t *rktparlist,
-        int (*cmp) (const void *, const void *, void *),
-        void *opaque) {
-
-        if (!cmp)
-                cmp = rd_kafka_topic_partition_cmp;
-
-        rd_qsort_r(rktparlist->elems, rktparlist->cnt,
-                   sizeof(*rktparlist->elems),
-                   cmp, opaque);
-}
-
-
-void rd_kafka_topic_partition_list_sort_by_topic (
-        rd_kafka_topic_partition_list_t *rktparlist) {
-        rd_kafka_topic_partition_list_sort(rktparlist,
-                                           rd_kafka_topic_partition_cmp, NULL);
-}
-
-rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset (
-	rd_kafka_topic_partition_list_t *rktparlist,
-	const char *topic, int32_t partition, int64_t offset) {
-	rd_kafka_topic_partition_t *rktpar;
-
-	if (!(rktpar = rd_kafka_topic_partition_list_find(rktparlist,
-							  topic, partition)))
-		return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-
-	rktpar->offset = offset;
-
-	return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Reset all offsets to the provided value.
- */
-void
-rd_kafka_

<TRUNCATED>

[36/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdgz.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdgz.h b/thirdparty/librdkafka-0.11.1/src/rdgz.h
deleted file mode 100644
index 8ce9052..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdgz.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-/**
- * Simple gzip decompression returning the inflated data
- * in a malloced buffer.
- * '*decompressed_lenp' must be 0 if the length of the uncompressed data
- * is not known in which case it will be calculated.
- * The returned buffer is nul-terminated (the actual allocated length
- * is '*decompressed_lenp'+1.
- *
- * The decompressed length is returned in '*decompressed_lenp'.
- */
-void *rd_gz_decompress (const void *compressed, int compressed_len,
-			uint64_t *decompressed_lenp);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdinterval.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdinterval.h b/thirdparty/librdkafka-0.11.1/src/rdinterval.h
deleted file mode 100644
index b4be600..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdinterval.h
+++ /dev/null
@@ -1,116 +0,0 @@
-#pragma once
-
-
-
-#include "rd.h"
-
-typedef struct rd_interval_s {
-        rd_ts_t    ri_ts_last; /* last interval timestamp */
-        rd_ts_t    ri_fixed;   /* fixed interval if provided interval is 0 */
-        int        ri_backoff; /* back off the next interval by this much */
-} rd_interval_t;
-
-
-static RD_INLINE RD_UNUSED void rd_interval_init (rd_interval_t *ri) {
-        memset(ri, 0, sizeof(*ri));
-}
-
-
-
-/**
- * Returns the number of microseconds the interval has been over-shot.
- * If the return value is >0 (i.e., time for next intervalled something) then
- * the time interval is updated for the next inteval.
- *
- * A current time can be provided in 'now', if set to 0 the time will be
- * gathered automatically.
- *
- * If 'interval_us' is set to 0 the fixed interval will be used, see
- * 'rd_interval_fixed()'.
- *
- * If this is the first time rd_interval() is called after an _init() or
- * _reset() and the \p immediate parameter is true, then a positive value
- * will be returned immediately even though the initial interval has not passed.
- */
-#define rd_interval(ri,interval_us,now) rd_interval0(ri,interval_us,now,0)
-#define rd_interval_immediate(ri,interval_us,now) \
-	rd_interval0(ri,interval_us,now,1)
-static RD_INLINE RD_UNUSED rd_ts_t rd_interval0 (rd_interval_t *ri,
-						 rd_ts_t interval_us,
-						 rd_ts_t now,
-						 int immediate) {
-        rd_ts_t diff;
-
-        if (!now)
-                now = rd_clock();
-        if (!interval_us)
-                interval_us = ri->ri_fixed;
-
-        if (ri->ri_ts_last || !immediate) {
-                diff = now - (ri->ri_ts_last + interval_us + ri->ri_backoff);
-        } else
-                diff = 1;
-        if (unlikely(diff > 0)) {
-                ri->ri_ts_last = now;
-                ri->ri_backoff = 0;
-        }
-
-        return diff;
-}
-
-
-/**
- * Reset the interval to zero, i.e., the next call to rd_interval()
- * will be immediate.
- */
-static RD_INLINE RD_UNUSED void rd_interval_reset (rd_interval_t *ri) {
-        ri->ri_ts_last = 0;
-        ri->ri_backoff = 0;
-}
-
-/**
- * Back off the next interval by `backoff_us` microseconds.
- */
-static RD_INLINE RD_UNUSED void rd_interval_backoff (rd_interval_t *ri,
-                                                    int backoff_us) {
-        ri->ri_backoff = backoff_us;
-}
-
-/**
- * Expedite (speed up) the next interval by `expedite_us` microseconds.
- * If `expedite_us` is 0 the interval will be set to trigger
- * immedately on the next rd_interval() call.
- */
-static RD_INLINE RD_UNUSED void rd_interval_expedite (rd_interval_t *ri,
-						     int expedite_us) {
-	if (!expedite_us)
-		ri->ri_ts_last = 0;
-	else
-		ri->ri_backoff = -expedite_us;
-}
-
-/**
- * Specifies a fixed interval to use if rd_interval() is called with
- * `interval_us` set to 0.
- */
-static RD_INLINE RD_UNUSED void rd_interval_fixed (rd_interval_t *ri,
-                                                  rd_ts_t fixed_us) {
-        ri->ri_fixed = fixed_us;
-}
-
-/**
- * Disables the interval (until rd_interval_init()/reset() is called).
- * A disabled interval will never return a positive value from
- * rd_interval().
- */
-static RD_INLINE RD_UNUSED void rd_interval_disable (rd_interval_t *ri) {
-        /* Set last beat to a large value a long time in the future. */
-        ri->ri_ts_last = 6000000000000000000LL; /* in about 190000 years */
-}
-
-/**
- * Returns true if the interval is disabled.
- */
-static RD_INLINE RD_UNUSED int rd_interval_disabled (const rd_interval_t *ri) {
-        return ri->ri_ts_last == 6000000000000000000LL;
-}


[34/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka.h b/thirdparty/librdkafka-0.11.1/src/rdkafka.h
deleted file mode 100644
index efb781c..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka.h
+++ /dev/null
@@ -1,3820 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013 Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * @file rdkafka.h
- * @brief Apache Kafka C/C++ consumer and producer client library.
- *
- * rdkafka.h contains the public API for librdkafka.
- * The API is documented in this file as comments prefixing the function, type,
- * enum, define, etc.
- *
- * @sa For the C++ interface see rdkafkacpp.h
- *
- * @tableofcontents
- */
-
-
-/* @cond NO_DOC */
-#pragma once
-
-#include <stdio.h>
-#include <inttypes.h>
-#include <sys/types.h>
-
-#ifdef __cplusplus
-extern "C" {
-#if 0
-} /* Restore indent */
-#endif
-#endif
-
-#ifdef _MSC_VER
-#include <basetsd.h>
-#ifndef WIN32_MEAN_AND_LEAN
-#define WIN32_MEAN_AND_LEAN
-#endif
-#include <Winsock2.h>  /* for sockaddr, .. */
-typedef SSIZE_T ssize_t;
-#define RD_UNUSED
-#define RD_INLINE __inline
-#define RD_DEPRECATED __declspec(deprecated)
-#undef RD_EXPORT
-#ifdef LIBRDKAFKA_STATICLIB
-#define RD_EXPORT
-#else
-#ifdef LIBRDKAFKA_EXPORTS
-#define RD_EXPORT __declspec(dllexport)
-#else
-#define RD_EXPORT __declspec(dllimport)
-#endif
-#ifndef LIBRDKAFKA_TYPECHECKS
-#define LIBRDKAFKA_TYPECHECKS 0
-#endif
-#endif
-
-#else
-#include <sys/socket.h> /* for sockaddr, .. */
-
-#define RD_UNUSED __attribute__((unused))
-#define RD_INLINE inline
-#define RD_EXPORT
-#define RD_DEPRECATED __attribute__((deprecated))
-
-#ifndef LIBRDKAFKA_TYPECHECKS
-#define LIBRDKAFKA_TYPECHECKS 1
-#endif
-#endif
-
-
-/**
- * @brief Type-checking macros
- * Compile-time checking that \p ARG is of type \p TYPE.
- * @returns \p RET
- */
-#if LIBRDKAFKA_TYPECHECKS
-#define _LRK_TYPECHECK(RET,TYPE,ARG)                    \
-        ({ if (0) { TYPE __t RD_UNUSED = (ARG); } RET; })
-
-#define _LRK_TYPECHECK2(RET,TYPE,ARG,TYPE2,ARG2)        \
-        ({                                              \
-                if (0) {                                \
-                        TYPE __t RD_UNUSED = (ARG);     \
-                        TYPE2 __t2 RD_UNUSED = (ARG2);  \
-                }                                       \
-                RET; })
-#else
-#define _LRK_TYPECHECK(RET,TYPE,ARG)  (RET)
-#define _LRK_TYPECHECK2(RET,TYPE,ARG,TYPE2,ARG2) (RET)
-#endif
-
-/* @endcond */
-
-
-/**
- * @name librdkafka version
- * @{
- *
- *
- */
-
-/**
- * @brief librdkafka version
- *
- * Interpreted as hex \c MM.mm.rr.xx:
- *  - MM = Major
- *  - mm = minor
- *  - rr = revision
- *  - xx = pre-release id (0xff is the final release)
- *
- * E.g.: \c 0x000801ff = 0.8.1
- *
- * @remark This value should only be used during compile time,
- *         for runtime checks of version use rd_kafka_version()
- */
-#define RD_KAFKA_VERSION  0x000b01ff
-
-/**
- * @brief Returns the librdkafka version as integer.
- *
- * @returns Version integer.
- *
- * @sa See RD_KAFKA_VERSION for how to parse the integer format.
- * @sa Use rd_kafka_version_str() to retreive the version as a string.
- */
-RD_EXPORT
-int rd_kafka_version(void);
-
-/**
- * @brief Returns the librdkafka version as string.
- *
- * @returns Version string
- */
-RD_EXPORT
-const char *rd_kafka_version_str (void);
-
-/**@}*/
-
-
-/**
- * @name Constants, errors, types
- * @{
- *
- *
- */
-
-
-/**
- * @enum rd_kafka_type_t
- *
- * @brief rd_kafka_t handle type.
- *
- * @sa rd_kafka_new()
- */
-typedef enum rd_kafka_type_t {
-	RD_KAFKA_PRODUCER, /**< Producer client */
-	RD_KAFKA_CONSUMER  /**< Consumer client */
-} rd_kafka_type_t;
-
-
-/**
- * @enum Timestamp types
- *
- * @sa rd_kafka_message_timestamp()
- */
-typedef enum rd_kafka_timestamp_type_t {
-	RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,   /**< Timestamp not available */
-	RD_KAFKA_TIMESTAMP_CREATE_TIME,     /**< Message creation time */
-	RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME  /**< Log append time */
-} rd_kafka_timestamp_type_t;
-
-
-
-/**
- * @brief Retrieve supported debug contexts for use with the \c \"debug\"
- *        configuration property. (runtime)
- *
- * @returns Comma-separated list of available debugging contexts.
- */
-RD_EXPORT
-const char *rd_kafka_get_debug_contexts(void);
-
-/**
- * @brief Supported debug contexts. (compile time)
- *
- * @deprecated This compile time value may be outdated at runtime due to
- *             linking another version of the library.
- *             Use rd_kafka_get_debug_contexts() instead.
- */
-#define RD_KAFKA_DEBUG_CONTEXTS \
-	"all,generic,broker,topic,metadata,queue,msg,protocol,cgrp,security,fetch,feature"
-
-
-/* @cond NO_DOC */
-/* Private types to provide ABI compatibility */
-typedef struct rd_kafka_s rd_kafka_t;
-typedef struct rd_kafka_topic_s rd_kafka_topic_t;
-typedef struct rd_kafka_conf_s rd_kafka_conf_t;
-typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
-typedef struct rd_kafka_queue_s rd_kafka_queue_t;
-/* @endcond */
-
-
-/**
- * @enum rd_kafka_resp_err_t
- * @brief Error codes.
- *
- * The negative error codes delimited by two underscores
- * (\c RD_KAFKA_RESP_ERR__..) denotes errors internal to librdkafka and are
- * displayed as \c \"Local: \<error string..\>\", while the error codes
- * delimited by a single underscore (\c RD_KAFKA_RESP_ERR_..) denote broker
- * errors and are displayed as \c \"Broker: \<error string..\>\".
- *
- * @sa Use rd_kafka_err2str() to translate an error code a human readable string
- */
-typedef enum {
-	/* Internal errors to rdkafka: */
-	/** Begin internal error codes */
-	RD_KAFKA_RESP_ERR__BEGIN = -200,
-	/** Received message is incorrect */
-	RD_KAFKA_RESP_ERR__BAD_MSG = -199,
-	/** Bad/unknown compression */
-	RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198,
-	/** Broker is going away */
-	RD_KAFKA_RESP_ERR__DESTROY = -197,
-	/** Generic failure */
-	RD_KAFKA_RESP_ERR__FAIL = -196,
-	/** Broker transport failure */
-	RD_KAFKA_RESP_ERR__TRANSPORT = -195,
-	/** Critical system resource */
-	RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194,
-	/** Failed to resolve broker */
-	RD_KAFKA_RESP_ERR__RESOLVE = -193,
-	/** Produced message timed out*/
-	RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192,
-	/** Reached the end of the topic+partition queue on
-	 * the broker. Not really an error. */
-	RD_KAFKA_RESP_ERR__PARTITION_EOF = -191,
-	/** Permanent: Partition does not exist in cluster. */
-	RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190,
-	/** File or filesystem error */
-	RD_KAFKA_RESP_ERR__FS = -189,
-	 /** Permanent: Topic does not exist in cluster. */
-	RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188,
-	/** All broker connections are down. */
-	RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187,
-	/** Invalid argument, or invalid configuration */
-	RD_KAFKA_RESP_ERR__INVALID_ARG = -186,
-	/** Operation timed out */
-	RD_KAFKA_RESP_ERR__TIMED_OUT = -185,
-	/** Queue is full */
-	RD_KAFKA_RESP_ERR__QUEUE_FULL = -184,
-	/** ISR count < required.acks */
-        RD_KAFKA_RESP_ERR__ISR_INSUFF = -183,
-	/** Broker node update */
-        RD_KAFKA_RESP_ERR__NODE_UPDATE = -182,
-	/** SSL error */
-	RD_KAFKA_RESP_ERR__SSL = -181,
-	/** Waiting for coordinator to become available. */
-        RD_KAFKA_RESP_ERR__WAIT_COORD = -180,
-	/** Unknown client group */
-        RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = -179,
-	/** Operation in progress */
-        RD_KAFKA_RESP_ERR__IN_PROGRESS = -178,
-	 /** Previous operation in progress, wait for it to finish. */
-        RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = -177,
-	 /** This operation would interfere with an existing subscription */
-        RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = -176,
-	/** Assigned partitions (rebalance_cb) */
-        RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175,
-	/** Revoked partitions (rebalance_cb) */
-        RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174,
-	/** Conflicting use */
-        RD_KAFKA_RESP_ERR__CONFLICT = -173,
-	/** Wrong state */
-        RD_KAFKA_RESP_ERR__STATE = -172,
-	/** Unknown protocol */
-        RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = -171,
-	/** Not implemented */
-        RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = -170,
-	/** Authentication failure*/
-	RD_KAFKA_RESP_ERR__AUTHENTICATION = -169,
-	/** No stored offset */
-	RD_KAFKA_RESP_ERR__NO_OFFSET = -168,
-	/** Outdated */
-	RD_KAFKA_RESP_ERR__OUTDATED = -167,
-	/** Timed out in queue */
-	RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166,
-        /** Feature not supported by broker */
-        RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = -165,
-        /** Awaiting cache update */
-        RD_KAFKA_RESP_ERR__WAIT_CACHE = -164,
-        /** Operation interrupted (e.g., due to yield)) */
-        RD_KAFKA_RESP_ERR__INTR = -163,
-        /** Key serialization error */
-        RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = -162,
-        /** Value serialization error */
-        RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = -161,
-        /** Key deserialization error */
-        RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = -160,
-        /** Value deserialization error */
-        RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = -159,
-
-	/** End internal error codes */
-	RD_KAFKA_RESP_ERR__END = -100,
-
-	/* Kafka broker errors: */
-	/** Unknown broker error */
-	RD_KAFKA_RESP_ERR_UNKNOWN = -1,
-	/** Success */
-	RD_KAFKA_RESP_ERR_NO_ERROR = 0,
-	/** Offset out of range */
-	RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
-	/** Invalid message */
-	RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
-	/** Unknown topic or partition */
-	RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
-	/** Invalid message size */
-	RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
-	/** Leader not available */
-	RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
-	/** Not leader for partition */
-	RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
-	/** Request timed out */
-	RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
-	/** Broker not available */
-	RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
-	/** Replica not available */
-	RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
-	/** Message size too large */
-	RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
-	/** StaleControllerEpochCode */
-	RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
-	/** Offset metadata string too large */
-	RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
-	/** Broker disconnected before response received */
-	RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
-	/** Group coordinator load in progress */
-        RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS = 14,
-	 /** Group coordinator not available */
-        RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15,
-	/** Not coordinator for group */
-        RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP = 16,
-	/** Invalid topic */
-        RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
-	/** Message batch larger than configured server segment size */
-        RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
-	/** Not enough in-sync replicas */
-        RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
-	/** Message(s) written to insufficient number of in-sync replicas */
-        RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
-	/** Invalid required acks value */
-        RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
-	/** Specified group generation id is not valid */
-        RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
-	/** Inconsistent group protocol */
-        RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
-	/** Invalid group.id */
-	RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
-	/** Unknown member */
-        RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
-	/** Invalid session timeout */
-        RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
-	/** Group rebalance in progress */
-	RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
-	/** Commit offset data size is not valid */
-        RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
-	/** Topic authorization failed */
-        RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
-	/** Group authorization failed */
-	RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
-	/** Cluster authorization failed */
-	RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
-	/** Invalid timestamp */
-	RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
-	/** Unsupported SASL mechanism */
-	RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
-	/** Illegal SASL state */
-	RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
-	/** Unuspported version */
-	RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
-	/** Topic already exists */
-	RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
-	/** Invalid number of partitions */
-	RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
-	/** Invalid replication factor */
-	RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
-	/** Invalid replica assignment */
-	RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
-	/** Invalid config */
-	RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
-	/** Not controller for cluster */
-	RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
-	/** Invalid request */
-	RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
-	/** Message format on broker does not support request */
-	RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
-        /** Isolation policy volation */
-        RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
-        /** Broker received an out of order sequence number */
-        RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
-        /** Broker received a duplicate sequence number */
-        RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
-        /** Producer attempted an operation with an old epoch */
-        RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
-        /** Producer attempted a transactional operation in an invalid state */
-        RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
-        /** Producer attempted to use a producer id which is not
-         *  currently assigned to its transactional id */
-        RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
-        /** Transaction timeout is larger than the maximum
-         *  value allowed by the broker's max.transaction.timeout.ms */
-        RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
-        /** Producer attempted to update a transaction while another
-         *  concurrent operation on the same transaction was ongoing */
-        RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
-        /** Indicates that the transaction coordinator sending a
-         *  WriteTxnMarker is no longer the current coordinator for a
-         *  given producer */
-        RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
-        /** Transactional Id authorization failed */
-        RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
-        /** Security features are disabled */
-        RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
-        /** Operation not attempted */
-        RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
-
-	RD_KAFKA_RESP_ERR_END_ALL,
-} rd_kafka_resp_err_t;
-
-
-/**
- * @brief Error code value, name and description.
- *        Typically for use with language bindings to automatically expose
- *        the full set of librdkafka error codes.
- */
-struct rd_kafka_err_desc {
-	rd_kafka_resp_err_t code;/**< Error code */
-	const char *name;      /**< Error name, same as code enum sans prefix */
-	const char *desc;      /**< Human readable error description. */
-};
-
-
-/**
- * @brief Returns the full list of error codes.
- */
-RD_EXPORT
-void rd_kafka_get_err_descs (const struct rd_kafka_err_desc **errdescs,
-			     size_t *cntp);
-
-
-
-
-/**
- * @brief Returns a human readable representation of a kafka error.
- *
- * @param err Error code to translate
- */
-RD_EXPORT
-const char *rd_kafka_err2str (rd_kafka_resp_err_t err);
-
-
-
-/**
- * @brief Returns the error code name (enum name).
- *
- * @param err Error code to translate
- */
-RD_EXPORT
-const char *rd_kafka_err2name (rd_kafka_resp_err_t err);
-
-
-/**
- * @brief Returns the last error code generated by a legacy API call
- *        in the current thread.
- *
- * The legacy APIs are the ones using errno to propagate error value, namely:
- *  - rd_kafka_topic_new()
- *  - rd_kafka_consume_start()
- *  - rd_kafka_consume_stop()
- *  - rd_kafka_consume()
- *  - rd_kafka_consume_batch()
- *  - rd_kafka_consume_callback()
- *  - rd_kafka_consume_queue()
- *  - rd_kafka_produce()
- *
- * The main use for this function is to avoid converting system \p errno
- * values to rd_kafka_resp_err_t codes for legacy APIs.
- *
- * @remark The last error is stored per-thread, if multiple rd_kafka_t handles
- *         are used in the same application thread the developer needs to
- *         make sure rd_kafka_last_error() is called immediately after
- *         a failed API call.
- *
- * @remark errno propagation from librdkafka is not safe on Windows
- *         and should not be used, use rd_kafka_last_error() instead.
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_last_error (void);
-
-
-/**
- * @brief Converts the system errno value \p errnox to a rd_kafka_resp_err_t
- *        error code upon failure from the following functions:
- *  - rd_kafka_topic_new()
- *  - rd_kafka_consume_start()
- *  - rd_kafka_consume_stop()
- *  - rd_kafka_consume()
- *  - rd_kafka_consume_batch()
- *  - rd_kafka_consume_callback()
- *  - rd_kafka_consume_queue()
- *  - rd_kafka_produce()
- *
- * @param errnox  System errno value to convert
- *
- * @returns Appropriate error code for \p errnox
- *
- * @remark A better alternative is to call rd_kafka_last_error() immediately
- *         after any of the above functions return -1 or NULL.
- *
- * @deprecated Use rd_kafka_last_error() to retrieve the last error code
- *             set by the legacy librdkafka APIs.
- *
- * @sa rd_kafka_last_error()
- */
-RD_EXPORT RD_DEPRECATED
-rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
-
-
-/**
- * @brief Returns the thread-local system errno
- *
- * On most platforms this is the same as \p errno but in case of different
- * runtimes between library and application (e.g., Windows static DLLs)
- * this provides a means for exposing the errno librdkafka uses.
- *
- * @remark The value is local to the current calling thread.
- *
- * @deprecated Use rd_kafka_last_error() to retrieve the last error code
- *             set by the legacy librdkafka APIs.
- */
-RD_EXPORT RD_DEPRECATED
-int rd_kafka_errno (void);
-
-
-
-/**
- * @brief Topic+Partition place holder
- *
- * Generic place holder for a Topic+Partition and its related information
- * used for multiple purposes:
- *   - consumer offset (see rd_kafka_commit(), et.al.)
- *   - group rebalancing callback (rd_kafka_conf_set_rebalance_cb())
- *   - offset commit result callback (rd_kafka_conf_set_offset_commit_cb())
- */
-
-/**
- * @brief Generic place holder for a specific Topic+Partition.
- *
- * @sa rd_kafka_topic_partition_list_new()
- */
-typedef struct rd_kafka_topic_partition_s {
-        char        *topic;             /**< Topic name */
-        int32_t      partition;         /**< Partition */
-	int64_t      offset;            /**< Offset */
-        void        *metadata;          /**< Metadata */
-        size_t       metadata_size;     /**< Metadata size */
-        void        *opaque;            /**< Application opaque */
-        rd_kafka_resp_err_t err;        /**< Error code, depending on use. */
-        void       *_private;           /**< INTERNAL USE ONLY,
-                                         *   INITIALIZE TO ZERO, DO NOT TOUCH */
-} rd_kafka_topic_partition_t;
-
-
-/**
- * @brief Destroy a rd_kafka_topic_partition_t.
- * @remark This must not be called for elements in a topic partition list.
- */
-RD_EXPORT
-void rd_kafka_topic_partition_destroy (rd_kafka_topic_partition_t *rktpar);
-
-
-/**
- * @brief A growable list of Topic+Partitions.
- *
- */
-typedef struct rd_kafka_topic_partition_list_s {
-        int cnt;               /**< Current number of elements */
-        int size;              /**< Current allocated size */
-        rd_kafka_topic_partition_t *elems; /**< Element array[] */
-} rd_kafka_topic_partition_list_t;
-
-
-/**
- * @brief Create a new list/vector Topic+Partition container.
- *
- * @param size  Initial allocated size used when the expected number of
- *              elements is known or can be estimated.
- *              Avoids reallocation and possibly relocation of the
- *              elems array.
- *
- * @returns A newly allocated Topic+Partition list.
- *
- * @remark Use rd_kafka_topic_partition_list_destroy() to free all resources
- *         in use by a list and the list itself.
- * @sa     rd_kafka_topic_partition_list_add()
- */
-RD_EXPORT
-rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new (int size);
-
-
-/**
- * @brief Free all resources used by the list and the list itself.
- */
-RD_EXPORT
-void
-rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rkparlist);
-
-/**
- * @brief Add topic+partition to list
- *
- * @param rktparlist List to extend
- * @param topic      Topic name (copied)
- * @param partition  Partition id
- *
- * @returns The object which can be used to fill in additionals fields.
- */
-RD_EXPORT
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_list_add (rd_kafka_topic_partition_list_t *rktparlist,
-                                   const char *topic, int32_t partition);
-
-
-/**
- * @brief Add range of partitions from \p start to \p stop inclusive.
- *
- * @param rktparlist List to extend
- * @param topic      Topic name (copied)
- * @param start      Start partition of range
- * @param stop       Last partition of range (inclusive)
- */
-RD_EXPORT
-void
-rd_kafka_topic_partition_list_add_range (rd_kafka_topic_partition_list_t
-                                         *rktparlist,
-                                         const char *topic,
-                                         int32_t start, int32_t stop);
-
-
-
-/**
- * @brief Delete partition from list.
- *
- * @param rktparlist List to modify
- * @param topic      Topic name to match
- * @param partition  Partition to match
- *
- * @returns 1 if partition was found (and removed), else 0.
- *
- * @remark Any held indices to elems[] are unusable after this call returns 1.
- */
-RD_EXPORT
-int
-rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist,
-				   const char *topic, int32_t partition);
-
-
-/**
- * @brief Delete partition from list by elems[] index.
- *
- * @returns 1 if partition was found (and removed), else 0.
- *
- * @sa rd_kafka_topic_partition_list_del()
- */
-RD_EXPORT
-int
-rd_kafka_topic_partition_list_del_by_idx (
-	rd_kafka_topic_partition_list_t *rktparlist,
-	int idx);
-
-
-/**
- * @brief Make a copy of an existing list.
- *
- * @param src   The existing list to copy.
- *
- * @returns A new list fully populated to be identical to \p src
- */
-RD_EXPORT
-rd_kafka_topic_partition_list_t *
-rd_kafka_topic_partition_list_copy (const rd_kafka_topic_partition_list_t *src);
-
-
-
-
-/**
- * @brief Set offset to \p offset for \p topic and \p partition
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or
- *          RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if \p partition was not found
- *          in the list.
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset (
-	rd_kafka_topic_partition_list_t *rktparlist,
-	const char *topic, int32_t partition, int64_t offset);
-
-
-
-/**
- * @brief Find element by \p topic and \p partition.
- *
- * @returns a pointer to the first matching element, or NULL if not found.
- */
-RD_EXPORT
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_list_find (rd_kafka_topic_partition_list_t *rktparlist,
-				    const char *topic, int32_t partition);
-
-
-/**
- * @brief Sort list using comparator \p cmp.
- *
- * If \p cmp is NULL the default comparator will be used that
- * sorts by ascending topic name and partition.
- *
- */
-RD_EXPORT void
-rd_kafka_topic_partition_list_sort (rd_kafka_topic_partition_list_t *rktparlist,
-                                    int (*cmp) (const void *a, const void *b,
-                                                void *opaque),
-                                    void *opaque);
-
-
-/**@}*/
-
-
-
-/**
- * @name Var-arg tag types
- * @{
- *
- */
-
-/**
- * @enum rd_kafka_vtype_t
- *
- * @brief Var-arg tag types
- *
- * @sa rd_kafka_producev()
- */
-typedef enum rd_kafka_vtype_t {
-        RD_KAFKA_VTYPE_END,       /**< va-arg sentinel */
-        RD_KAFKA_VTYPE_TOPIC,     /**< (const char *) Topic name */
-        RD_KAFKA_VTYPE_RKT,       /**< (rd_kafka_topic_t *) Topic handle */
-        RD_KAFKA_VTYPE_PARTITION, /**< (int32_t) Partition */
-        RD_KAFKA_VTYPE_VALUE,     /**< (void *, size_t) Message value (payload)*/
-        RD_KAFKA_VTYPE_KEY,       /**< (void *, size_t) Message key */
-        RD_KAFKA_VTYPE_OPAQUE,    /**< (void *) Application opaque */
-        RD_KAFKA_VTYPE_MSGFLAGS,  /**< (int) RD_KAFKA_MSG_F_.. flags */
-        RD_KAFKA_VTYPE_TIMESTAMP, /**< (int64_t) Milliseconds since epoch UTC */
-} rd_kafka_vtype_t;
-
-
-/**
- * @brief Convenience macros for rd_kafka_vtype_t that takes the
- *        correct arguments for each vtype.
- */
-
-/*!
- * va-arg end sentinel used to terminate the variable argument list
- */
-#define RD_KAFKA_V_END RD_KAFKA_VTYPE_END
-
-/*!
- * Topic name (const char *)
- */
-#define RD_KAFKA_V_TOPIC(topic)                                         \
-        _LRK_TYPECHECK(RD_KAFKA_VTYPE_TOPIC, const char *, topic),      \
-        (const char *)topic
-/*!
- * Topic object (rd_kafka_topic_t *)
- */
-#define RD_KAFKA_V_RKT(rkt)                                             \
-        _LRK_TYPECHECK(RD_KAFKA_VTYPE_RKT, rd_kafka_topic_t *, rkt),    \
-        (rd_kafka_topic_t *)rkt
-/*!
- * Partition (int32_t)
- */
-#define RD_KAFKA_V_PARTITION(partition)                                 \
-        _LRK_TYPECHECK(RD_KAFKA_VTYPE_PARTITION, int32_t, partition),   \
-        (int32_t)partition
-/*!
- * Message value/payload pointer and length (void *, size_t)
- */
-#define RD_KAFKA_V_VALUE(VALUE,LEN)                                     \
-        _LRK_TYPECHECK2(RD_KAFKA_VTYPE_VALUE, void *, VALUE, size_t, LEN), \
-        (void *)VALUE, (size_t)LEN
-/*!
- * Message key pointer and length (const void *, size_t)
- */
-#define RD_KAFKA_V_KEY(KEY,LEN)                                         \
-        _LRK_TYPECHECK2(RD_KAFKA_VTYPE_KEY, const void *, KEY, size_t, LEN), \
-        (void *)KEY, (size_t)LEN
-/*!
- * Opaque pointer (void *)
- */
-#define RD_KAFKA_V_OPAQUE(opaque)                                 \
-        _LRK_TYPECHECK(RD_KAFKA_VTYPE_OPAQUE, void *, opaque),    \
-        (void *)opaque
-/*!
- * Message flags (int)
- * @sa RD_KAFKA_MSG_F_COPY, et.al.
- */
-#define RD_KAFKA_V_MSGFLAGS(msgflags)                                 \
-        _LRK_TYPECHECK(RD_KAFKA_VTYPE_MSGFLAGS, int, msgflags),       \
-        (int)msgflags
-/*!
- * Timestamp (int64_t)
- */
-#define RD_KAFKA_V_TIMESTAMP(timestamp)                                 \
-        _LRK_TYPECHECK(RD_KAFKA_VTYPE_TIMESTAMP, int64_t, timestamp),   \
-        (int64_t)timestamp
-
-/**@}*/
-
-
-/**
- * @name Kafka messages
- * @{
- *
- */
-
-
-
-// FIXME: This doesn't show up in docs for some reason
-// "Compound rd_kafka_message_t is not documented."
-
-/**
- * @brief A Kafka message as returned by the \c rd_kafka_consume*() family
- *        of functions as well as provided to the Producer \c dr_msg_cb().
- *
- * For the consumer this object has two purposes:
- *  - provide the application with a consumed message. (\c err == 0)
- *  - report per-topic+partition consumer errors (\c err != 0)
- *
- * The application must check \c err to decide what action to take.
- *
- * When the application is finished with a message it must call
- * rd_kafka_message_destroy() unless otherwise noted.
- */
-typedef struct rd_kafka_message_s {
-	rd_kafka_resp_err_t err;   /**< Non-zero for error signaling. */
-	rd_kafka_topic_t *rkt;     /**< Topic */
-	int32_t partition;         /**< Partition */
-	void   *payload;           /**< Producer: original message payload.
-				    * Consumer: Depends on the value of \c err :
-				    * - \c err==0: Message payload.
-				    * - \c err!=0: Error string */
-	size_t  len;               /**< Depends on the value of \c err :
-				    * - \c err==0: Message payload length
-				    * - \c err!=0: Error string length */
-	void   *key;               /**< Depends on the value of \c err :
-				    * - \c err==0: Optional message key */
-	size_t  key_len;           /**< Depends on the value of \c err :
-				    * - \c err==0: Optional message key length*/
-	int64_t offset;            /**< Consume:
-                                    * - Message offset (or offset for error
-				    *   if \c err!=0 if applicable).
-                                    * - dr_msg_cb:
-                                    *   Message offset assigned by broker.
-                                    *   If \c produce.offset.report is set then
-                                    *   each message will have this field set,
-                                    *   otherwise only the last message in
-                                    *   each produced internal batch will
-                                    *   have this field set, otherwise 0. */
-	void  *_private;           /**< Consume:
-				    *  - rdkafka private pointer: DO NOT MODIFY
-				    *  - dr_msg_cb:
-                                    *    msg_opaque from produce() call */
-} rd_kafka_message_t;
-
-
-/**
- * @brief Frees resources for \p rkmessage and hands ownership back to rdkafka.
- */
-RD_EXPORT
-void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
-
-
-
-
-/**
- * @brief Returns the error string for an errored rd_kafka_message_t or NULL if
- *        there was no error.
- *
- * @remark This function MUST NOT be used with the producer.
- */
-static RD_INLINE const char *
-RD_UNUSED 
-rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage) {
-	if (!rkmessage->err)
-		return NULL;
-
-	if (rkmessage->payload)
-		return (const char *)rkmessage->payload;
-
-	return rd_kafka_err2str(rkmessage->err);
-}
-
-
-
-/**
- * @brief Returns the message timestamp for a consumed message.
- *
- * The timestamp is the number of milliseconds since the epoch (UTC).
- *
- * \p tstype (if not NULL) is updated to indicate the type of timestamp.
- *
- * @returns message timestamp, or -1 if not available.
- *
- * @remark Message timestamps require broker version 0.10.0 or later.
- */
-RD_EXPORT
-int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage,
-				    rd_kafka_timestamp_type_t *tstype);
-
-
-
-/**
- * @brief Returns the latency for a produced message measured from
- *        the produce() call.
- *
- * @returns the latency in microseconds, or -1 if not available.
- */
-RD_EXPORT
-int64_t rd_kafka_message_latency (const rd_kafka_message_t *rkmessage);
-
-
-/**@}*/
-
-
-/**
- * @name Configuration interface
- * @{
- *
- * @brief Main/global configuration property interface
- *
- */
-
-/**
- * @enum rd_kafka_conf_res_t
- * @brief Configuration result type
- */
-typedef enum {
-	RD_KAFKA_CONF_UNKNOWN = -2, /**< Unknown configuration name. */
-	RD_KAFKA_CONF_INVALID = -1, /**< Invalid configuration value. */
-	RD_KAFKA_CONF_OK = 0        /**< Configuration okay */
-} rd_kafka_conf_res_t;
-
-
-/**
- * @brief Create configuration object.
- *
- * When providing your own configuration to the \c rd_kafka_*_new_*() calls
- * the rd_kafka_conf_t objects needs to be created with this function
- * which will set up the defaults.
- * I.e.:
- * @code
- *   rd_kafka_conf_t *myconf;
- *   rd_kafka_conf_res_t res;
- *
- *   myconf = rd_kafka_conf_new();
- *   res = rd_kafka_conf_set(myconf, "socket.timeout.ms", "600",
- *                           errstr, sizeof(errstr));
- *   if (res != RD_KAFKA_CONF_OK)
- *      die("%s\n", errstr);
- *   
- *   rk = rd_kafka_new(..., myconf);
- * @endcode
- *
- * Please see CONFIGURATION.md for the default settings or use
- * rd_kafka_conf_properties_show() to provide the information at runtime.
- *
- * The properties are identical to the Apache Kafka configuration properties
- * whenever possible.
- *
- * @returns A new rd_kafka_conf_t object with defaults set.
- *
- * @sa rd_kafka_conf_set(), rd_kafka_conf_destroy()
- */
-RD_EXPORT
-rd_kafka_conf_t *rd_kafka_conf_new(void);
-
-
-/**
- * @brief Destroys a conf object.
- */
-RD_EXPORT
-void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
-
-
-/**
- * @brief Creates a copy/duplicate of configuration object \p conf
- *
- * @remark Interceptors are NOT copied to the new configuration object.
- * @sa rd_kafka_interceptor_f_on_conf_dup
- */
-RD_EXPORT
-rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
-
-
-/**
- * @brief Same as rd_kafka_conf_dup() but with an array of property name
- *        prefixes to filter out (ignore) when copying.
- */
-RD_EXPORT
-rd_kafka_conf_t *rd_kafka_conf_dup_filter (const rd_kafka_conf_t *conf,
-                                           size_t filter_cnt,
-                                           const char **filter);
-
-
-
-/**
- * @brief Sets a configuration property.
- *
- * \p conf must have been previously created with rd_kafka_conf_new().
- *
- * Fallthrough:
- * Topic-level configuration properties may be set using this interface
- * in which case they are applied on the \c default_topic_conf.
- * If no \c default_topic_conf has been set one will be created.
- * Any sub-sequent rd_kafka_conf_set_default_topic_conf() calls will
- * replace the current default topic configuration.
- *
- * @returns \c rd_kafka_conf_res_t to indicate success or failure.
- * In case of failure \p errstr is updated to contain a human readable
- * error string.
- */
-RD_EXPORT
-rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf,
-				       const char *name,
-				       const char *value,
-				       char *errstr, size_t errstr_size);
-
-
-/**
- * @brief Enable event sourcing.
- * \p events is a bitmask of \c RD_KAFKA_EVENT_* of events to enable
- * for consumption by `rd_kafka_queue_poll()`.
- */
-RD_EXPORT
-void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
-
-
-/**
- @deprecated See rd_kafka_conf_set_dr_msg_cb()
-*/
-RD_EXPORT
-void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf,
-			      void (*dr_cb) (rd_kafka_t *rk,
-					     void *payload, size_t len,
-					     rd_kafka_resp_err_t err,
-					     void *opaque, void *msg_opaque));
-
-/**
- * @brief \b Producer: Set delivery report callback in provided \p conf object.
- *
- * The delivery report callback will be called once for each message
- * accepted by rd_kafka_produce() (et.al) with \p err set to indicate
- * the result of the produce request.
- * 
- * The callback is called when a message is succesfully produced or
- * if librdkafka encountered a permanent failure, or the retry counter for
- * temporary errors has been exhausted.
- *
- * An application must call rd_kafka_poll() at regular intervals to
- * serve queued delivery report callbacks.
- */
-RD_EXPORT
-void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf,
-                                  void (*dr_msg_cb) (rd_kafka_t *rk,
-                                                     const rd_kafka_message_t *
-                                                     rkmessage,
-                                                     void *opaque));
-
-
-/**
- * @brief \b Consumer: Set consume callback for use with rd_kafka_consumer_poll()
- *
- */
-RD_EXPORT
-void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf,
-                                   void (*consume_cb) (rd_kafka_message_t *
-                                                       rkmessage,
-                                                       void *opaque));
-
-/**
- * @brief \b Consumer: Set rebalance callback for use with
- *                     coordinated consumer group balancing.
- *
- * The \p err field is set to either RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
- * or RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS and 'partitions'
- * contains the full partition set that was either assigned or revoked.
- *
- * Registering a \p rebalance_cb turns off librdkafka's automatic
- * partition assignment/revocation and instead delegates that responsibility
- * to the application's \p rebalance_cb.
- *
- * The rebalance callback is responsible for updating librdkafka's
- * assignment set based on the two events: RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
- * and RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS but should also be able to handle
- * arbitrary rebalancing failures where \p err is neither of those.
- * @remark In this latter case (arbitrary error), the application must
- *         call rd_kafka_assign(rk, NULL) to synchronize state.
- *
- * Without a rebalance callback this is done automatically by librdkafka
- * but registering a rebalance callback gives the application flexibility
- * in performing other operations along with the assinging/revocation,
- * such as fetching offsets from an alternate location (on assign)
- * or manually committing offsets (on revoke).
- *
- * @remark The \p partitions list is destroyed by librdkafka on return
- *         return from the rebalance_cb and must not be freed or
- *         saved by the application.
- * 
- * The following example shows the application's responsibilities:
- * @code
- *    static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
- *                              rd_kafka_topic_partition_list_t *partitions,
- *                              void *opaque) {
- *
- *        switch (err)
- *        {
- *          case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
- *             // application may load offets from arbitrary external
- *             // storage here and update \p partitions
- *
- *             rd_kafka_assign(rk, partitions);
- *             break;
- *
- *          case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
- *             if (manual_commits) // Optional explicit manual commit
- *                 rd_kafka_commit(rk, partitions, 0); // sync commit
- *
- *             rd_kafka_assign(rk, NULL);
- *             break;
- *
- *          default:
- *             handle_unlikely_error(err);
- *             rd_kafka_assign(rk, NULL); // sync state
- *             break;
- *         }
- *    }
- * @endcode
- */
-RD_EXPORT
-void rd_kafka_conf_set_rebalance_cb (
-        rd_kafka_conf_t *conf,
-        void (*rebalance_cb) (rd_kafka_t *rk,
-                              rd_kafka_resp_err_t err,
-                              rd_kafka_topic_partition_list_t *partitions,
-                              void *opaque));
-
-
-
-/**
- * @brief \b Consumer: Set offset commit callback for use with consumer groups.
- *
- * The results of automatic or manual offset commits will be scheduled
- * for this callback and is served by rd_kafka_consumer_poll().
- *
- * If no partitions had valid offsets to commit this callback will be called
- * with \p err == RD_KAFKA_RESP_ERR__NO_OFFSET which is not to be considered
- * an error.
- *
- * The \p offsets list contains per-partition information:
- *   - \c offset: committed offset (attempted)
- *   - \c err:    commit error
- */
-RD_EXPORT
-void rd_kafka_conf_set_offset_commit_cb (
-        rd_kafka_conf_t *conf,
-        void (*offset_commit_cb) (rd_kafka_t *rk,
-                                  rd_kafka_resp_err_t err,
-                                  rd_kafka_topic_partition_list_t *offsets,
-                                  void *opaque));
-
-
-/**
- * @brief Set error callback in provided conf object.
- *
- * The error callback is used by librdkafka to signal critical errors
- * back to the application.
- *
- * If no \p error_cb is registered then the errors will be logged instead.
- */
-RD_EXPORT
-void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf,
-				 void  (*error_cb) (rd_kafka_t *rk, int err,
-						    const char *reason,
-						    void *opaque));
-
-/**
- * @brief Set throttle callback.
- *
- * The throttle callback is used to forward broker throttle times to the
- * application for Produce and Fetch (consume) requests.
- *
- * Callbacks are triggered whenever a non-zero throttle time is returned by
- * the broker, or when the throttle time drops back to zero.
- *
- * An application must call rd_kafka_poll() or rd_kafka_consumer_poll() at
- * regular intervals to serve queued callbacks.
- *
- * @remark Requires broker version 0.9.0 or later.
- */
-RD_EXPORT
-void rd_kafka_conf_set_throttle_cb (rd_kafka_conf_t *conf,
-				    void (*throttle_cb) (
-					    rd_kafka_t *rk,
-					    const char *broker_name,
-					    int32_t broker_id,
-					    int throttle_time_ms,
-					    void *opaque));
-
-
-/**
- * @brief Set logger callback.
- *
- * The default is to print to stderr, but a syslog logger is also available,
- * see rd_kafka_log_print and rd_kafka_log_syslog for the builtin alternatives.
- * Alternatively the application may provide its own logger callback.
- * Or pass \p func as NULL to disable logging.
- *
- * This is the configuration alternative to the deprecated rd_kafka_set_logger()
- *
- * @remark The log_cb will be called spontaneously from librdkafka's internal
- *         threads unless logs have been forwarded to a poll queue through
- *         \c rd_kafka_set_log_queue().
- *         An application MUST NOT call any librdkafka APIs or do any prolonged
- *         work in a non-forwarded \c log_cb.
- */
-RD_EXPORT
-void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf,
-			  void (*log_cb) (const rd_kafka_t *rk, int level,
-                                          const char *fac, const char *buf));
-
-
-/**
- * @brief Set statistics callback in provided conf object.
- *
- * The statistics callback is triggered from rd_kafka_poll() every
- * \c statistics.interval.ms (needs to be configured separately).
- * Function arguments:
- *   - \p rk - Kafka handle
- *   - \p json - String containing the statistics data in JSON format
- *   - \p json_len - Length of \p json string.
- *   - \p opaque - Application-provided opaque.
- *
- * If the application wishes to hold on to the \p json pointer and free
- * it at a later time it must return 1 from the \p stats_cb.
- * If the application returns 0 from the \p stats_cb then librdkafka
- * will immediately free the \p json pointer.
- */
-RD_EXPORT
-void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf,
-				 int (*stats_cb) (rd_kafka_t *rk,
-						  char *json,
-						  size_t json_len,
-						  void *opaque));
-
-
-
-/**
- * @brief Set socket callback.
- *
- * The socket callback is responsible for opening a socket
- * according to the supplied \p domain, \p type and \p protocol.
- * The socket shall be created with \c CLOEXEC set in a racefree fashion, if
- * possible.
- *
- * Default:
- *  - on linux: racefree CLOEXEC
- *  - others  : non-racefree CLOEXEC
- *
- * @remark The callback will be called from an internal librdkafka thread.
- */
-RD_EXPORT
-void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf,
-                                  int (*socket_cb) (int domain, int type,
-                                                    int protocol,
-                                                    void *opaque));
-
-
-
-/**
- * @brief Set connect callback.
- *
- * The connect callback is responsible for connecting socket \p sockfd
- * to peer address \p addr.
- * The \p id field contains the broker identifier.
- *
- * \p connect_cb shall return 0 on success (socket connected) or an error
- * number (errno) on error.
- *
- * @remark The callback will be called from an internal librdkafka thread.
- */
-RD_EXPORT void
-rd_kafka_conf_set_connect_cb (rd_kafka_conf_t *conf,
-                              int (*connect_cb) (int sockfd,
-                                                 const struct sockaddr *addr,
-                                                 int addrlen,
-                                                 const char *id,
-                                                 void *opaque));
-
-/**
- * @brief Set close socket callback.
- *
- * Close a socket (optionally opened with socket_cb()).
- *
- * @remark The callback will be called from an internal librdkafka thread.
- */
-RD_EXPORT void
-rd_kafka_conf_set_closesocket_cb (rd_kafka_conf_t *conf,
-                                  int (*closesocket_cb) (int sockfd,
-                                                         void *opaque));
-
-
-
-#ifndef _MSC_VER
-/**
- * @brief Set open callback.
- *
- * The open callback is responsible for opening the file specified by
- * pathname, flags and mode.
- * The file shall be opened with \c CLOEXEC set in a racefree fashion, if
- * possible.
- *
- * Default:
- *  - on linux: racefree CLOEXEC
- *  - others  : non-racefree CLOEXEC
- *
- * @remark The callback will be called from an internal librdkafka thread.
- */
-RD_EXPORT
-void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf,
-                                int (*open_cb) (const char *pathname,
-                                                int flags, mode_t mode,
-                                                void *opaque));
-#endif
-
-/**
- * @brief Sets the application's opaque pointer that will be passed to callbacks
- */
-RD_EXPORT
-void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
-
-/**
- * @brief Retrieves the opaque pointer previously set with rd_kafka_conf_set_opaque()
- */
-RD_EXPORT
-void *rd_kafka_opaque(const rd_kafka_t *rk);
-
-
-
-/**
- * Sets the default topic configuration to use for automatically
- * subscribed topics (e.g., through pattern-matched topics).
- * The topic config object is not usable after this call.
- */
-RD_EXPORT
-void rd_kafka_conf_set_default_topic_conf (rd_kafka_conf_t *conf,
-                                           rd_kafka_topic_conf_t *tconf);
-
-
-
-/**
- * @brief Retrieve configuration value for property \p name.
- *
- * If \p dest is non-NULL the value will be written to \p dest with at
- * most \p dest_size.
- *
- * \p *dest_size is updated to the full length of the value, thus if
- * \p *dest_size initially is smaller than the full length the application
- * may reallocate \p dest to fit the returned \p *dest_size and try again.
- *
- * If \p dest is NULL only the full length of the value is returned.
- *
- * Fallthrough:
- * Topic-level configuration properties from the \c default_topic_conf
- * may be retrieved using this interface.
- *
- * @returns \p RD_KAFKA_CONF_OK if the property name matched, else
- * \p RD_KAFKA_CONF_UNKNOWN.
- */
-RD_EXPORT
-rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf,
-                                       const char *name,
-                                       char *dest, size_t *dest_size);
-
-
-/**
- * @brief Retrieve topic configuration value for property \p name.
- *
- * @sa rd_kafka_conf_get()
- */
-RD_EXPORT
-rd_kafka_conf_res_t rd_kafka_topic_conf_get (const rd_kafka_topic_conf_t *conf,
-                                             const char *name,
-                                             char *dest, size_t *dest_size);
-
-
-/**
- * @brief Dump the configuration properties and values of \p conf to an array
- *        with \"key\", \"value\" pairs.
- *
- * The number of entries in the array is returned in \p *cntp.
- *
- * The dump must be freed with `rd_kafka_conf_dump_free()`.
- */
-RD_EXPORT
-const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
-
-
-/**
- * @brief Dump the topic configuration properties and values of \p conf
- *        to an array with \"key\", \"value\" pairs.
- *
- * The number of entries in the array is returned in \p *cntp.
- *
- * The dump must be freed with `rd_kafka_conf_dump_free()`.
- */
-RD_EXPORT
-const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf,
-				       size_t *cntp);
-
-/**
- * @brief Frees a configuration dump returned from `rd_kafka_conf_dump()` or
- *        `rd_kafka_topic_conf_dump().
- */
-RD_EXPORT
-void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
-
-/**
- * @brief Prints a table to \p fp of all supported configuration properties,
- *        their default values as well as a description.
- */
-RD_EXPORT
-void rd_kafka_conf_properties_show(FILE *fp);
-
-/**@}*/
-
-
-/**
- * @name Topic configuration
- * @{
- *
- * @brief Topic configuration property interface
- *
- */
-
-
-/**
- * @brief Create topic configuration object
- *
- * @sa Same semantics as for rd_kafka_conf_new().
- */
-RD_EXPORT
-rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
-
-
-/**
- * @brief Creates a copy/duplicate of topic configuration object \p conf.
- */
-RD_EXPORT
-rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t
-						*conf);
-
-
-/**
- * @brief Destroys a topic conf object.
- */
-RD_EXPORT
-void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
-
-
-/**
- * @brief Sets a single rd_kafka_topic_conf_t value by property name.
- *
- * \p topic_conf should have been previously set up
- * with `rd_kafka_topic_conf_new()`.
- *
- * @returns rd_kafka_conf_res_t to indicate success or failure.
- */
-RD_EXPORT
-rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf,
-					     const char *name,
-					     const char *value,
-					     char *errstr, size_t errstr_size);
-
-/**
- * @brief Sets the application's opaque pointer that will be passed to all topic
- * callbacks as the \c rkt_opaque argument.
- */
-RD_EXPORT
-void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *opaque);
-
-
-/**
- * @brief \b Producer: Set partitioner callback in provided topic conf object.
- *
- * The partitioner may be called in any thread at any time,
- * it may be called multiple times for the same message/key.
- *
- * Partitioner function constraints:
- *   - MUST NOT call any rd_kafka_*() functions except:
- *       rd_kafka_topic_partition_available()
- *   - MUST NOT block or execute for prolonged periods of time.
- *   - MUST return a value between 0 and partition_cnt-1, or the
- *     special \c RD_KAFKA_PARTITION_UA value if partitioning
- *     could not be performed.
- */
-RD_EXPORT
-void
-rd_kafka_topic_conf_set_partitioner_cb (rd_kafka_topic_conf_t *topic_conf,
-					int32_t (*partitioner) (
-						const rd_kafka_topic_t *rkt,
-						const void *keydata,
-						size_t keylen,
-						int32_t partition_cnt,
-						void *rkt_opaque,
-						void *msg_opaque));
-
-/**
- * @brief Check if partition is available (has a leader broker).
- *
- * @returns 1 if the partition is available, else 0.
- *
- * @warning This function must only be called from inside a partitioner function
- */
-RD_EXPORT
-int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt,
-					int32_t partition);
-
-
-/*******************************************************************
- *								   *
- * Partitioners provided by rdkafka                                *
- *								   *
- *******************************************************************/
-
-/**
- * @brief Random partitioner.
- *
- * Will try not to return unavailable partitions.
- *
- * @returns a random partition between 0 and \p partition_cnt - 1.
- *
- */
-RD_EXPORT
-int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt,
-					 const void *key, size_t keylen,
-					 int32_t partition_cnt,
-					 void *opaque, void *msg_opaque);
-
-/**
- * @brief Consistent partitioner.
- *
- * Uses consistent hashing to map identical keys onto identical partitions.
- *
- * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on
- *          the CRC value of the key
- */
-RD_EXPORT
-int32_t rd_kafka_msg_partitioner_consistent (const rd_kafka_topic_t *rkt,
-					 const void *key, size_t keylen,
-					 int32_t partition_cnt,
-					 void *opaque, void *msg_opaque);
-
-/**
- * @brief Consistent-Random partitioner.
- *
- * This is the default partitioner.
- * Uses consistent hashing to map identical keys onto identical partitions, and
- * messages without keys will be assigned via the random partitioner.
- *
- * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on
- *          the CRC value of the key (if provided)
- */
-RD_EXPORT
-int32_t rd_kafka_msg_partitioner_consistent_random (const rd_kafka_topic_t *rkt,
-           const void *key, size_t keylen,
-           int32_t partition_cnt,
-           void *opaque, void *msg_opaque);
-
-
-/**@}*/
-
-
-
-/**
- * @name Main Kafka and Topic object handles
- * @{
- *
- *
- */
-
-
-
-
-/**
- * @brief Creates a new Kafka handle and starts its operation according to the
- *        specified \p type (\p RD_KAFKA_CONSUMER or \p RD_KAFKA_PRODUCER).
- *
- * \p conf is an optional struct created with `rd_kafka_conf_new()` that will
- * be used instead of the default configuration.
- * The \p conf object is freed by this function on success and must not be used
- * or destroyed by the application sub-sequently.
- * See `rd_kafka_conf_set()` et.al for more information.
- *
- * \p errstr must be a pointer to memory of at least size \p errstr_size where
- * `rd_kafka_new()` may write a human readable error message in case the
- * creation of a new handle fails. In which case the function returns NULL.
- *
- * @remark \b RD_KAFKA_CONSUMER: When a new \p RD_KAFKA_CONSUMER
- *           rd_kafka_t handle is created it may either operate in the
- *           legacy simple consumer mode using the rd_kafka_consume_start()
- *           interface, or the High-level KafkaConsumer API.
- * @remark An application must only use one of these groups of APIs on a given
- *         rd_kafka_t RD_KAFKA_CONSUMER handle.
-
- *
- * @returns The Kafka handle on success or NULL on error (see \p errstr)
- *
- * @sa To destroy the Kafka handle, use rd_kafka_destroy().
- */
-RD_EXPORT
-rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf,
-			  char *errstr, size_t errstr_size);
-
-
-/**
- * @brief Destroy Kafka handle.
- *
- * @remark This is a blocking operation.
- */
-RD_EXPORT
-void        rd_kafka_destroy(rd_kafka_t *rk);
-
-
-
-/**
- * @brief Returns Kafka handle name.
- */
-RD_EXPORT
-const char *rd_kafka_name(const rd_kafka_t *rk);
-
-
-/**
- * @brief Returns Kafka handle type.
- */
-RD_EXPORT
-rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
-
-
-/**
- * @brief Returns this client's broker-assigned group member id 
- *
- * @remark This currently requires the high-level KafkaConsumer
- *
- * @returns An allocated string containing the current broker-assigned group
- *          member id, or NULL if not available.
- *          The application must free the string with \p free() or
- *          rd_kafka_mem_free()
- */
-RD_EXPORT
-char *rd_kafka_memberid (const rd_kafka_t *rk);
-
-
-
-/**
- * @brief Returns the ClusterId as reported in broker metadata.
- *
- * @param timeout_ms If there is no cached value from metadata retrieval
- *                   then this specifies the maximum amount of time
- *                   (in milliseconds) the call will block waiting
- *                   for metadata to be retrieved.
- *                   Use 0 for non-blocking calls.
-
- * @remark Requires broker version >=0.10.0 and api.version.request=true.
- *
- * @remark The application must free the returned pointer
- *         using rd_kafka_mem_free().
- *
- * @returns a newly allocated string containing the ClusterId, or NULL
- *          if no ClusterId could be retrieved in the allotted timespan.
- */
-RD_EXPORT
-char *rd_kafka_clusterid (rd_kafka_t *rk, int timeout_ms);
-
-
-/**
- * @brief Creates a new topic handle for topic named \p topic.
- *
- * \p conf is an optional configuration for the topic created with
- * `rd_kafka_topic_conf_new()` that will be used instead of the default
- * topic configuration.
- * The \p conf object is freed by this function and must not be used or
- * destroyed by the application sub-sequently.
- * See `rd_kafka_topic_conf_set()` et.al for more information.
- *
- * Topic handles are refcounted internally and calling rd_kafka_topic_new()
- * again with the same topic name will return the previous topic handle
- * without updating the original handle's configuration.
- * Applications must eventually call rd_kafka_topic_destroy() for each
- * succesfull call to rd_kafka_topic_new() to clear up resources.
- *
- * @returns the new topic handle or NULL on error (use rd_kafka_errno2err()
- *          to convert system \p errno to an rd_kafka_resp_err_t error code.
- *
- * @sa rd_kafka_topic_destroy()
- */
-RD_EXPORT
-rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic,
-				      rd_kafka_topic_conf_t *conf);
-
-
-
-/**
- * @brief Loose application's topic handle refcount as previously created
- *        with `rd_kafka_topic_new()`.
- *
- * @remark Since topic objects are refcounted (both internally and for the app)
- *         the topic object might not actually be destroyed by this call,
- *         but the application must consider the object destroyed.
- */
-RD_EXPORT
-void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
-
-
-/**
- * @brief Returns the topic name.
- */
-RD_EXPORT
-const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
-
-
-/**
- * @brief Get the \p rkt_opaque pointer that was set in the topic configuration.
- */
-RD_EXPORT
-void *rd_kafka_topic_opaque (const rd_kafka_topic_t *rkt);
-
-
-/**
- * @brief Unassigned partition.
- *
- * The unassigned partition is used by the producer API for messages
- * that should be partitioned using the configured or default partitioner.
- */
-#define RD_KAFKA_PARTITION_UA  ((int32_t)-1)
-
-
-/**
- * @brief Polls the provided kafka handle for events.
- *
- * Events will cause application provided callbacks to be called.
- *
- * The \p timeout_ms argument specifies the maximum amount of time
- * (in milliseconds) that the call will block waiting for events.
- * For non-blocking calls, provide 0 as \p timeout_ms.
- * To wait indefinately for an event, provide -1.
- *
- * @remark  An application should make sure to call poll() at regular
- *          intervals to serve any queued callbacks waiting to be called.
- *
- * Events:
- *   - delivery report callbacks  (if dr_cb/dr_msg_cb is configured) [producer]
- *   - error callbacks (rd_kafka_conf_set_error_cb()) [all]
- *   - stats callbacks (rd_kafka_conf_set_stats_cb()) [all]
- *   - throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all]
- *
- * @returns the number of events served.
- */
-RD_EXPORT
-int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
-
-
-/**
- * @brief Cancels the current callback dispatcher (rd_kafka_poll(),
- *        rd_kafka_consume_callback(), etc).
- *
- * A callback may use this to force an immediate return to the calling
- * code (caller of e.g. rd_kafka_poll()) without processing any further
- * events.
- *
- * @remark This function MUST ONLY be called from within a librdkafka callback.
- */
-RD_EXPORT
-void rd_kafka_yield (rd_kafka_t *rk);
-
-
-
-
-/**
- * @brief Pause producing or consumption for the provided list of partitions.
- *
- * Success or error is returned per-partition \p err in the \p partitions list.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_pause_partitions (rd_kafka_t *rk,
-			   rd_kafka_topic_partition_list_t *partitions);
-
-
-
-/**
- * @brief Resume producing consumption for the provided list of partitions.
- *
- * Success or error is returned per-partition \p err in the \p partitions list.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_resume_partitions (rd_kafka_t *rk,
-			    rd_kafka_topic_partition_list_t *partitions);
-
-
-
-
-/**
- * @brief Query broker for low (oldest/beginning) and high (newest/end) offsets
- *        for partition.
- *
- * Offsets are returned in \p *low and \p *high respectively.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_query_watermark_offsets (rd_kafka_t *rk,
-		      const char *topic, int32_t partition,
-		      int64_t *low, int64_t *high, int timeout_ms);
-
-
-/**
- * @brief Get last known low (oldest/beginning) and high (newest/end) offsets
- *        for partition.
- *
- * The low offset is updated periodically (if statistics.interval.ms is set)
- * while the high offset is updated on each fetched message set from the broker.
- *
- * If there is no cached offset (either low or high, or both) then
- * RD_KAFKA_OFFSET_INVALID will be returned for the respective offset.
- *
- * Offsets are returned in \p *low and \p *high respectively.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure.
- *
- * @remark Shall only be used with an active consumer instance.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_get_watermark_offsets (rd_kafka_t *rk,
-				const char *topic, int32_t partition,
-				int64_t *low, int64_t *high);
-
-
-
-/**
- * @brief Look up the offsets for the given partitions by timestamp.
- *
- * The returned offset for each partition is the earliest offset whose
- * timestamp is greater than or equal to the given timestamp in the
- * corresponding partition.
- *
- * The timestamps to query are represented as \c offset in \p offsets
- * on input, and \c offset will contain the offset on output.
- *
- * The function will block for at most \p timeout_ms milliseconds.
- *
- * @remark Duplicate Topic+Partitions are not supported.
- * @remark Per-partition errors may be returned in \c rd_kafka_topic_partition_t.err
- *
- * @returns an error code for general errors, else RD_KAFKA_RESP_ERR_NO_ERROR
- *          in which case per-partition errors might be set.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_offsets_for_times (rd_kafka_t *rk,
-                            rd_kafka_topic_partition_list_t *offsets,
-                            int timeout_ms);
-
-
-/**
- * @brief Free pointer returned by librdkafka
- *
- * This is typically an abstraction for the free(3) call and makes sure
- * the application can use the same memory allocator as librdkafka for
- * freeing pointers returned by librdkafka.
- *
- * In standard setups it is usually not necessary to use this interface
- * rather than the free(3) functione.
- *
- * @remark rd_kafka_mem_free() must only be used for pointers returned by APIs
- *         that explicitly mention using this function for freeing.
- */
-RD_EXPORT
-void rd_kafka_mem_free (rd_kafka_t *rk, void *ptr);
-
-
-/**@}*/
-
-
-
-
-
-/**
- * @name Queue API
- * @{
- *
- * Message queues allows the application to re-route consumed messages
- * from multiple topic+partitions into one single queue point.
- * This queue point containing messages from a number of topic+partitions
- * may then be served by a single rd_kafka_consume*_queue() call,
- * rather than one call per topic+partition combination.
- */
-
-
-/**
- * @brief Create a new message queue.
- *
- * See rd_kafka_consume_start_queue(), rd_kafka_consume_queue(), et.al.
- */
-RD_EXPORT
-rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
-
-/**
- * Destroy a queue, purging all of its enqueued messages.
- */
-RD_EXPORT
-void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
-
-
-/**
- * @returns a reference to the main librdkafka event queue.
- * This is the queue served by rd_kafka_poll().
- *
- * Use rd_kafka_queue_destroy() to loose the reference.
- */
-RD_EXPORT
-rd_kafka_queue_t *rd_kafka_queue_get_main (rd_kafka_t *rk);
-
-
-/**
- * @returns a reference to the librdkafka consumer queue.
- * This is the queue served by rd_kafka_consumer_poll().
- *
- * Use rd_kafka_queue_destroy() to loose the reference.
- *
- * @remark rd_kafka_queue_destroy() MUST be called on this queue
- *         prior to calling rd_kafka_consumer_close().
- */
-RD_EXPORT
-rd_kafka_queue_t *rd_kafka_queue_get_consumer (rd_kafka_t *rk);
-
-/**
- * @returns a reference to the partition's queue, or NULL if
- *          partition is invalid.
- *
- * Use rd_kafka_queue_destroy() to loose the reference.
- *
- * @remark rd_kafka_queue_destroy() MUST be called on this queue
- * 
- * @remark This function only works on consumers.
- */
-RD_EXPORT
-rd_kafka_queue_t *rd_kafka_queue_get_partition (rd_kafka_t *rk,
-                                                const char *topic,
-                                                int32_t partition);
-
-/**
- * @brief Forward/re-route queue \p src to \p dst.
- * If \p dst is \c NULL the forwarding is removed.
- *
- * The internal refcounts for both queues are increased.
- * 
- * @remark Regardless of whether \p dst is NULL or not, after calling this
- *         function, \p src will not forward it's fetch queue to the consumer
- *         queue.
- */
-RD_EXPORT
-void rd_kafka_queue_forward (rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
-
-/**
- * @brief Forward librdkafka logs (and debug) to the specified queue
- *        for serving with one of the ..poll() calls.
- *
- *        This allows an application to serve log callbacks (\c log_cb)
- *        in its thread of choice.
- *
- * @param rkqu Queue to forward logs to. If the value is NULL the logs
- *        are forwarded to the main queue.
- *
- * @remark The configuration property \c log.queue MUST also be set to true.
- *
- * @remark librdkafka maintains its own reference to the provided queue.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error.
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_set_log_queue (rd_kafka_t *rk,
-                                            rd_kafka_queue_t *rkqu);
-
-
-/**
- * @returns the current number of elements in queue.
- */
-RD_EXPORT
-size_t rd_kafka_queue_length (rd_kafka_queue_t *rkqu);
-
-
-/**
- * @brief Enable IO event triggering for queue.
- *
- * To ease integration with IO based polling loops this API
- * allows an application to create a separate file-descriptor
- * that librdkafka will write \p payload (of size \p size) to
- * whenever a new element is enqueued on a previously empty queue.
- *
- * To remove event triggering call with \p fd = -1.
- *
- * librdkafka will maintain a copy of the \p payload.
- *
- * @remark When using forwarded queues the IO event must only be enabled
- *         on the final forwarded-to (destination) queue.
- */
-RD_EXPORT
-void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd,
-				     const void *payload, size_t size);
-
-/**@}*/
-
-/**
- *
- * @name Simple Consumer API (legacy)
- * @{
- *
- */
-
-
-#define RD_KAFKA_OFFSET_BEGINNING -2  /**< Start consuming from beginning of
-				       *   kafka partition queue: oldest msg */
-#define RD_KAFKA_OFFSET_END       -1  /**< Start consuming from end of kafka
-				       *   partition queue: next msg */
-#define RD_KAFKA_OFFSET_STORED -1000  /**< Start consuming from offset retrieved
-				       *   from offset store */
-#define RD_KAFKA_OFFSET_INVALID -1001 /**< Invalid offset */
-
-
-/** @cond NO_DOC */
-#define RD_KAFKA_OFFSET_TAIL_BASE -2000 /* internal: do not use */
-/** @endcond */
-
-/**
- * @brief Start consuming \p CNT messages from topic's current end offset.
- *
- * That is, if current end offset is 12345 and \p CNT is 200, it will start
- * consuming from offset \c 12345-200 = \c 12145. */
-#define RD_KAFKA_OFFSET_TAIL(CNT)  (RD_KAFKA_OFFSET_TAIL_BASE - (CNT))
-
-/**
- * @brief Start consuming messages for topic \p rkt and \p partition
- * at offset \p offset which may either be an absolute \c (0..N)
- * or one of the logical offsets:
- *  - RD_KAFKA_OFFSET_BEGINNING
- *  - RD_KAFKA_OFFSET_END
- *  - RD_KAFKA_OFFSET_STORED
- *  - RD_KAFKA_OFFSET_TAIL
- *
- * rdkafka will attempt to keep \c queued.min.messages (config property)
- * messages in the local queue by repeatedly fetching batches of messages
- * from the broker until the threshold is reached.
- *
- * The application shall use one of the `rd_kafka_consume*()` functions
- * to consume messages from the local queue, each kafka message being
- * represented as a `rd_kafka_message_t *` object.
- *
- * `rd_kafka_consume_start()` must not be called multiple times for the same
- * topic and partition without stopping consumption first with
- * `rd_kafka_consume_stop()`.
- *
- * @returns 0 on success or -1 on error in which case errno is set accordingly:
- *  - EBUSY    - Conflicts with an existing or previous subscription
- *               (RD_KAFKA_RESP_ERR__CONFLICT)
- *  - EINVAL   - Invalid offset, or incomplete configuration (lacking group.id)
- *               (RD_KAFKA_RESP_ERR__INVALID_ARG)
- *  - ESRCH    - requested \p partition is invalid.
- *               (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
- *  - ENOENT   - topic is unknown in the Kafka cluster.
- *               (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
- *
- * Use `rd_kafka_errno2err()` to convert sytem \c errno to `rd_kafka_resp_err_t`
- */
-RD_EXPORT
-int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition,
-			    int64_t offset);
-
-/**
- * @brief Same as rd_kafka_consume_start() but re-routes incoming messages to
- * the provided queue \p rkqu (which must have been previously allocated
- * with `rd_kafka_queue_new()`.
- *
- * The application must use one of the `rd_kafka_consume_*_queue()` functions
- * to receive fetched messages.
- *
- * `rd_kafka_consume_start_queue()` must not be called multiple times for the
- * same topic and partition without stopping consumption first with
- * `rd_kafka_consume_stop()`.
- * `rd_kafka_consume_start()` and `rd_kafka_consume_start_queue()` must not
- * be combined for the same topic and partition.
- */
-RD_EXPORT
-int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition,
-				  int64_t offset, rd_kafka_queue_t *rkqu);
-
-/**
- * @brief Stop consuming messages for topic \p rkt and \p partition, purging
- * all messages currently in the local queue.
- *
- * NOTE: To enforce synchronisation this call will block until the internal
- *       fetcher has terminated and offsets are committed to configured
- *       storage method.
- *
- * The application needs to be stop all consumers before calling
- * `rd_kafka_destroy()` on the main object handle.
- *
- * @returns 0 on success or -1 on error (see `errno`).
- */
-RD_EXPORT
-int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
-
-
-
-/**
- * @brief Seek consumer for topic+partition to \p offset which is either an
- *        absolute or logical offset.
- *
- * If \p timeout_ms is not 0 the call will wait this long for the
- * seek to be performed. If the timeout is reached the internal state
- * will be unknown and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`.
- * If \p timeout_ms is 0 it will initiate the seek but return
- * immediately without any error reporting (e.g., async).
- *
- * This call triggers a fetch queue barrier flush.
- *
- * @returns `RD_KAFKA_RESP_ERR__NO_ERROR` on success else an error code.
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *rkt,
-                                   int32_t partition,
-                                   int64_t offset,
-                                   int timeout_ms);
-
-
-/**
- * @brief Consume a single message from topic \p rkt and \p partition
- *
- * \p timeout_ms is maximum amount of time to wait for a message to be received.
- * Consumer must have been previously started with `rd_kafka_consume_start()`.
- *
- * @returns a message object on success or \c NULL on error.
- * The message object must be destroyed with `rd_kafka_message_destroy()`
- * when the application is done with it.
- *
- * Errors (when returning NULL):
- *  - ETIMEDOUT - \p timeout_ms was reached with no new messages fetched.
- *  - ENOENT    - \p rkt + \p partition is unknown.
- *                 (no prior `rd_kafka_consume_start()` call)
- *
- * NOTE: The returned message's \c ..->err must be checked for errors.
- * NOTE: \c ..->err \c == \c RD_KAFKA_RESP_ERR__PARTITION_EOF signals that the
- *       end of the partition has been reached, which should typically not be
- *       considered an error. The application should handle this case
- *       (e.g., ignore).
- *
- * @remark on_consume() interceptors may be called from this function prior to
- *         passing message to application.
- */
-RD_EXPORT
-rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition,
-				      int timeout_ms);
-
-
-
-/**
- * @brief Consume up to \p rkmessages_size from topic \p rkt and \p partition
- *        putting a pointer to each message in the application provided
- *        array \p rkmessages (of size \p rkmessages_size entries).
- *
- * `rd_kafka_consume_batch()` provides higher throughput performance
- * than `rd_kafka_consume()`.
- *
- * \p timeout_ms is the maximum amount of time to wait for all of
- * \p rkmessages_size messages to be put into \p rkmessages.
- * If no messages were available within the timeout period this function
- * returns 0 and \p rkmessages remains untouched.
- * This differs somewhat from `rd_kafka_consume()`.
- *
- * The message objects must be destroyed with `rd_kafka_message_destroy()`
- * when the application is done with it.
- *
- * @returns the number of rkmessages added in \p rkmessages,
- * or -1 on error (same error codes as for `rd_kafka_consume()`.
- *
- * @sa rd_kafka_consume()
- *
- * @remark on_consume() interceptors may be called from this function prior to
- *         passing message to application.
- */
-RD_EXPORT
-ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition,
-				int timeout_ms,
-				rd_kafka_message_t **rkmessages,
-				size_t rkmessages_size);
-
-
-
-/**
- * @brief Consumes messages from topic \p rkt and \p partition, calling
- * the provided callback for each consumed messsage.
- *
- * `rd_kafka_consume_callback()` provides higher throughput performance
- * than both `rd_kafka_consume()` and `rd_kafka_consume_batch()`.
- *
- * \p timeout_ms is the maximum amount of time to wait for one or more messages
- * to arrive.
- *
- * The provided \p consume_cb function is called for each message,
- * the application \b MUST \b NOT call `rd_kafka_message_destroy()` on the
- * provided \p rkmessage.
- *
- * The \p opaque argument is passed to the 'consume_cb' as \p opaque.
- *
- * @returns the number of messages processed or -1 on error.
- *
- * @sa rd_kafka_consume()
- *
- * @remark on_consume() interceptors may be called from this function prior to
- *         passing message to application.
- */
-RD_EXPORT
-int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition,
-			       int timeout_ms,
-			       void (*consume_cb) (rd_kafka_message_t
-						   *rkmessage,
-						   void *opaque),
-			       void *opaque);
-
-
-/**
- * @name Simple Consumer API (legacy): Queue consumers
- * @{
- *
- * The following `..._queue()` functions are analogue to the functions above
- * but reads messages from the provided queue \p rkqu instead.
- * \p rkqu must have been previously created with `rd_kafka_queue_new()`
- * and the topic consumer must have been started with
- * `rd_kafka_consume_start_queue()` utilising the the same queue.
- */
-
-/**
- * @brief Consume from queue
- *
- * @sa rd_kafka_consume()
- */
-RD_EXPORT
-rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu,
-					    int timeout_ms);
-
-/**
- * @brief Consume batch of messages from queue
- *
- * @sa rd_kafka_consume_batch()
- */
-RD_EXPORT
-ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu,
-				      int timeout_ms,
-				      rd_kafka_message_t **rkmessages,
-				      size_t rkmessages_size);
-
-/**
- * @brief Consume multiple messages from queue with callback
- *
- * @sa rd_kafka_consume_callback()
- */
-RD_EXPORT
-int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu,
-				     int timeout_ms,
-				     void (*consume_cb) (rd_kafka_message_t
-							 *rkmessage,
-							 void *opaque),
-				     void *opaque);
-
-
-/**@}*/
-
-
-
-
-/**
- * @name Simple Consumer API (legacy): Topic+partition offset store.
- * @{
- *
- * If \c auto.commit.enable is true the offset is stored automatically prior to
- * returning of the message(s) in each of the rd_kafka_consume*() functions
- * above.
- */
-
-
-/**
- * @brief Store offset \p offset for topic \p rkt partition \p partition.
- *
- * The offset will be committed (written) to the offset store according
- * to \c `auto.commit.interval.ms` or manual offset-less commit()
- *
- * @remark \c `enable.auto.offset.store` must be set to "false" when using this API.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error.
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt,
-					   int32_t partition, int64_t offset);
-
-
-/**
- * @brief Store offsets for one or more partitions.
- *
- * The offset will be committed (written) to the offset store according
- * to \c `auto.commit.interval.ms` or manual offset-less commit().
- *
- * Per-partition success/error status propagated through each partition's
- * \c .err field.
- *
- * @remark \c `enable.auto.offset.store` must be set to "false" when using this API.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code if
- *          none of the offsets could be stored.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_offsets_store(rd_kafka_t *rk,
-                       rd_kafka_topic_partition_list_t *offsets);
-/**@}*/
-
-
-
-
-/**
- * @name KafkaConsumer (C)
- * @{
- * @brief High-level KafkaConsumer C API
- *
- *
- *
- */
-
-/**
- * @brief Subscribe to topic set using balanced consumer groups.
- *
- * Wildcard (regex) topics are supported by the librdkafka assignor:
- * any topic name in the \p topics list that is prefixed with \c \"^\" will
- * be regex-matched to the full list of topics in the cluster and matching
- * topics will be added to the subscription list.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or
- *          RD_KAFKA_RESP_ERR__INVALID_ARG if list is empty, contains invalid
- *          topics or regexes.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_subscribe (rd_kafka_t *rk,
-                    const rd_kafka_topic_partition_list_t *topics);
-
-
-/**
- * @brief Unsubscribe from the current subscription set.
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_unsubscribe (rd_kafka_t *rk);
-
-
-/**
- * @brief Returns the current topic subscription
- *
- * @returns An error code on failure, otherwise \p topic is updated
- *          to point to a newly allocated topic list (possibly empty).
- *
- * @remark The application is responsible for calling
- *         rd_kafka_topic_partition_list_destroy on the returned list.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_subscription (rd_kafka_t *rk,
-                       rd_kafka_topic_partition_list_t **topics);
-
-
-
-/**
- * @brief Poll the consumer for messages or events.
- *
- * Will block for at most \p timeout_ms milliseconds.
- *
- * @remark  An application should make sure to call consumer_poll() at regular
- *          intervals, even if no messages are expected, to serve any
- *          queued callbacks waiting to be called. This is especially
- *          important when a rebalance_cb has been registered as it needs
- *          to be called and handled properly to synchronize internal
- *          consumer state.
- *
- * @returns A message object which is a proper message if \p ->err is
- *          RD_KAFKA_RESP_ERR_NO_ERROR, or an event or error for any other
- *          value.
- *
- * @remark on_consume() interceptors may be called from this function prior to
- *         passing message to application.
- *
- * @sa rd_kafka_message_t
- */
-RD_EXPORT
-rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk, int timeout_ms);
-
-/**
- * @brief Close down the KafkaConsumer.
- *
- * @remark This call will block until the consumer has revoked its assignment,
- *         calling the \c rebalance_cb if it is configured, committed offsets
- *         to broker, and left the consumer group.
- *         The maximum blocking time is roughly limited to session.timeout.ms.
- *
- * @returns An error code indicating if the consumer close was succesful
- *          or not.
- *
- * @remark The application still needs to call rd_kafka_destroy() after
- *         this call finishes to clean up the underlying handle resources.
- *
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk);
-
-
-
-/**
- * @brief Atomic assignment of partitions to consume.
- *
- * The new \p partitions will replace the existing assignment.
- *
- * When used from a rebalance callback the application shall pass the
- * partition list passed to the callback (or a copy of it) (even if the list
- * is empty) rather than NULL to maintain internal join state.
-
- * A zero-length \p partitions will treat the partitions as a valid,
- * albeit empty, assignment, and maintain internal state, while a \c NULL
- * value for \p partitions will reset and clear the internal state.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_assign (rd_kafka_t *rk,
-                 const rd_kafka_topic_partition_list_t *partitions);
-
-/**
- * @brief Returns the current partition assignment
- *
- * @returns An error code on failure, otherwise \p partitions is updated
- *          to point to a newly allocated partition list (possibly empty).
- *
- * @remark The application is responsible for calling
- *         rd_kafka_topic_partition_list_destroy on the returned list.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_assignment (rd_kafka_t *rk,
-                     rd_kafka_topic_partition_list_t **partitions);
-
-
-
-
-/**
- * @brief Commit offsets on broker for the provided list of partitions.
- *
- * \p offsets should contain \c topic, \c partition, \c offset and possibly
- * \c metadata.
- * If \p offsets is NULL the current partition assignment will be used instead.
- *
- * If \p async is false this operation will block until the broker offset commit
- * is done, returning the resulting success or error code.
- *
- * If a rd_kafka_conf_set_offset_commit_cb() offset commit callback has been
- * configured the callback will be enqueued for a future call to
- * rd_kafka_poll(), rd_kafka_consumer_poll() or similar.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_commit (rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets,
-                 int async);
-
-
-/**
- * @brief Commit message's offset on broker for the message's partition.
- *
- * @sa rd_kafka_commit
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage,
-                         int async);
-
-
-/**
- * @brief Commit offsets on broker for the provided list of partitions.
- *
- * See rd_kafka_commit for \p offsets semantics.
- *
- * The result of the offset commit will be posted on the provided \p rkqu queue.
- *
- * If the application uses one of the poll APIs (rd_kafka_poll(),
- * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), ..) to serve the queue
- * the \p cb callback is required. \p opaque is passed to the callback.
- *
- * If using the event API the callback is ignored and the offset commit result
- * will be returned as an RD_KAFKA_EVENT_COMMIT event. The \p opaque
- * value will be available with rd_kafka_event_opaque()
- *
- * If \p rkqu is NULL a temporary queue will be created and the callback will
- * be served by this call.
- *
- * @sa rd_kafka_commit()
- * @sa rd_kafka_conf_set_offset_commit_cb()
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_commit_queue (rd_kafka_t *rk,
-		       const rd_kafka_topic_partition_list_t *offsets,
-		       rd_kafka_queue_t *rkqu,
-		       void (*cb) (rd_kafka_t *rk,
-				   rd_kafka_resp_err_t err,
-				   rd_kafka_topic_partition_list_t *offsets,
-				   void *opaque),
-		       void *opaque);
-
-
-/**
- * @brief Retrieve committed offsets for topics+partitions.
- *
- * The \p offset field of each requested partition will either be set to
- * stored offset or to RD_KAFKA_OFFSET_INVALID in case there was no stored
- * offset for that partition.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the
- *          \p offset or \p err field of each \p partitions' element is filled
- *          in with the stored offset, or a partition specific error.
- *          Else returns an error code.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_committed (rd_kafka_t *rk,
-		    rd_kafka_topic_partition_list_t *partitions,
-		    int timeout_ms);
-
-
-
-/**
- * @brief Retrieve current positions (offsets) for topics+partitions.
- *
- * The \p offset field of each requested partition will be set to the offset
- * of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there was
- * no previous message.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the
- *          \p offset or \p err field of each \p partitions' element is filled
- *          in with the stored offset, or a partition specific error.
- *          Else returns an error code.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_position (rd_kafka_t *rk,
-		   rd_kafka_topic_partition_list_t *partitions);
-
-
-/**@}*/
-
-
-
-/**
- * @name Producer API
- * @{
- *
- *
- */
-
-
-/**
- * @brief Producer message flags
- */
-#define RD_KAFKA_MSG_F_FREE  0x1 /**< Delegate freeing of payload to rdkafka. */
-#define RD_KAFKA_MSG_F_COPY  0x2 /**< rdkafka will make a copy of the payload. */
-#define RD_KAFKA_MSG_F_BLOCK 0x4 /**< Block produce*() on message queue full.
-				  *   WARNING: If a delivery report callback
-				  *            is used the application MUST
-				  *            call rd_kafka_poll() (or equiv.)
-				  *            to make sure delivered messages
-				  *            are drained from the internal
-				  *            delivery report queue.
-				  *            Failure to do so will result
-				  *            in indefinately blocking on
-				  *            the produce() call when the
-				  *            message queue is full.
-				  */
-
-
-
-/**
- * @brief Produce and send a single message to broker.
- *
- * \p rkt is the target topic which must have been previously created with
- * `rd_kafka_topic_new()`.
- *
- * `rd_kafka_produce()` is an asynch non-blocking API.
- *
- * \p partition is the target partition, either:
- *   - RD_KAFKA_PARTITION_UA (unassigned) for
- *     automatic partitioning using the topic's partitioner function, or
- *   - a fixed partition (0..N)
- *
- * \p msgflags is zero or more of the following flags OR:ed together:
- *    RD_KAFKA_MSG_F_BLOCK - block \p produce*() call if
- *                           \p queue.buffering.max.messages or
- *                           \p queue.buffering.max.kbytes are exceeded.
- *                           Messages are considered in-queue from the point they
- *                           are accepted by produce() until their corresponding
- *                           delivery report callback/event returns.
- *                           It is thus a requirement to call 
- *                           rd_kafka_poll() (or equiv.) from a separate
- *                           thread when F_BLOCK is used.
- *                           See WARNING on \c RD_KAFKA_MSG_F_BLOCK above.
- *
- *    RD_KAFKA_MSG_F_FREE - rdkafka will free(3) \p payload when it is done
- *                          with it.
- *    RD_KAFKA_MSG_F_COPY - the \p payload data will be copied and the 
- *                          \p payload pointer will not be used by rdkafka
- *                          after the call returns.
- *
- *    .._F_FREE and .._F_COPY are mutually exclusive.
- *
- *    If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then
- *    the memory associated with the payload is still the caller's
- *    responsibility.
- *
- * \p payload is the message payload of size \p len bytes.
- *
- * \p key is an optional message key of size \p keylen bytes, if non-NULL it
- * will be passed to the topic partitioner as well as be s

<TRUNCATED>

[33/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_assignor.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_assignor.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_assignor.c
deleted file mode 100644
index 45946db..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_assignor.c
+++ /dev/null
@@ -1,551 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#include "rdkafka_int.h"
-#include "rdkafka_assignor.h"
-
-#include <ctype.h>
-
-/**
- * Clear out and free any memory used by the member, but not the rkgm itself.
- */
-void rd_kafka_group_member_clear (rd_kafka_group_member_t *rkgm) {
-        if (rkgm->rkgm_subscription)
-                rd_kafka_topic_partition_list_destroy(rkgm->rkgm_subscription);
-
-        if (rkgm->rkgm_assignment)
-                rd_kafka_topic_partition_list_destroy(rkgm->rkgm_assignment);
-
-        rd_list_destroy(&rkgm->rkgm_eligible);
-
-        if (rkgm->rkgm_member_id)
-                rd_kafkap_str_destroy(rkgm->rkgm_member_id);
-
-        if (rkgm->rkgm_userdata)
-                rd_kafkap_bytes_destroy(rkgm->rkgm_userdata);
-
-        if (rkgm->rkgm_member_metadata)
-                rd_kafkap_bytes_destroy(rkgm->rkgm_member_metadata);
-
-        memset(rkgm, 0, sizeof(*rkgm));
-}
-
-
-/**
- * Member id string comparator (takes rd_kafka_group_member_t *)
- */
-int rd_kafka_group_member_cmp (const void *_a, const void *_b) {
-        const rd_kafka_group_member_t *a =
-                (const rd_kafka_group_member_t *)_a;
-        const rd_kafka_group_member_t *b =
-                (const rd_kafka_group_member_t *)_b;
-
-        return rd_kafkap_str_cmp(a->rkgm_member_id, b->rkgm_member_id);
-}
-
-
-/**
- * Returns true if member subscribes to topic, else false.
- */
-int
-rd_kafka_group_member_find_subscription (rd_kafka_t *rk,
-					 const rd_kafka_group_member_t *rkgm,
-					 const char *topic) {
-	int i;
-
-	/* Match against member's subscription. */
-        for (i = 0 ; i < rkgm->rkgm_subscription->cnt ; i++) {
-                const rd_kafka_topic_partition_t *rktpar =
-                        &rkgm->rkgm_subscription->elems[i];
-
-		if (rd_kafka_topic_partition_match(rk, rkgm, rktpar,
-						   topic, NULL))
-			return 1;
-	}
-
-	return 0;
-}
-
-
-
-static rd_kafkap_bytes_t *
-rd_kafka_consumer_protocol_member_metadata_new (
-	const rd_list_t *topics,
-        const void *userdata, size_t userdata_size) {
-        rd_kafka_buf_t *rkbuf;
-        rd_kafkap_bytes_t *kbytes;
-        int i;
-	int topic_cnt = rd_list_cnt(topics);
-	const rd_kafka_topic_info_t *tinfo;
-        size_t len;
-
-        /*
-         * MemberMetadata => Version Subscription AssignmentStrategies
-         *   Version      => int16
-         *   Subscription => Topics UserData
-         *     Topics     => [String]
-         *     UserData     => Bytes
-         */
-
-        rkbuf = rd_kafka_buf_new(1, 100 + (topic_cnt * 100) + userdata_size);
-
-        rd_kafka_buf_write_i16(rkbuf, 0);
-        rd_kafka_buf_write_i32(rkbuf, topic_cnt);
-	RD_LIST_FOREACH(tinfo, topics, i)
-                rd_kafka_buf_write_str(rkbuf, tinfo->topic, -1);
-	if (userdata)
-		rd_kafka_buf_write_bytes(rkbuf, userdata, userdata_size);
-	else /* Kafka 0.9.0.0 cant parse NULL bytes, so we provide empty. */
-		rd_kafka_buf_write_bytes(rkbuf, "", 0);
-
-        /* Get binary buffer and allocate a new Kafka Bytes with a copy. */
-        rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
-        len = rd_slice_remains(&rkbuf->rkbuf_reader);
-        kbytes = rd_kafkap_bytes_new(NULL, (int32_t)len);
-        rd_slice_read(&rkbuf->rkbuf_reader, (void *)kbytes->data, len);
-        rd_kafka_buf_destroy(rkbuf);
-
-        return kbytes;
-
-}
-
-
-
-
-rd_kafkap_bytes_t *
-rd_kafka_assignor_get_metadata (rd_kafka_assignor_t *rkas,
-				const rd_list_t *topics) {
-        return rd_kafka_consumer_protocol_member_metadata_new(
-                topics, rkas->rkas_userdata,
-                rkas->rkas_userdata_size);
-}
-
-
-
-
-
-/**
- * Returns 1 if all subscriptions are satifised for this member, else 0.
- */
-static int rd_kafka_member_subscription_match (
-        rd_kafka_cgrp_t *rkcg,
-        rd_kafka_group_member_t *rkgm,
-        const rd_kafka_metadata_topic_t *topic_metadata,
-        rd_kafka_assignor_topic_t *eligible_topic) {
-        int i;
-        int has_regex = 0;
-        int matched = 0;
-
-        /* Match against member's subscription. */
-        for (i = 0 ; i < rkgm->rkgm_subscription->cnt ; i++) {
-                const rd_kafka_topic_partition_t *rktpar =
-                        &rkgm->rkgm_subscription->elems[i];
-		int matched_by_regex = 0;
-
-		if (rd_kafka_topic_partition_match(rkcg->rkcg_rk, rkgm, rktpar,
-						   topic_metadata->topic,
-						   &matched_by_regex)) {
-			rd_list_add(&rkgm->rkgm_eligible,
-				    (void *)topic_metadata);
-			matched++;
-			has_regex += matched_by_regex;
-		}
-	}
-
-        if (matched)
-                rd_list_add(&eligible_topic->members, rkgm);
-
-        if (!has_regex &&
-            rd_list_cnt(&rkgm->rkgm_eligible) == rkgm->rkgm_subscription->cnt)
-                return 1; /* All subscriptions matched */
-        else
-                return 0;
-}
-
-
-static void
-rd_kafka_assignor_topic_destroy (rd_kafka_assignor_topic_t *at) {
-        rd_list_destroy(&at->members);
-        rd_free(at);
-}
-
-int rd_kafka_assignor_topic_cmp (const void *_a, const void *_b) {
-        const rd_kafka_assignor_topic_t *a =
-                *(const rd_kafka_assignor_topic_t * const *)_a;
-        const rd_kafka_assignor_topic_t *b =
-                *(const rd_kafka_assignor_topic_t * const *)_b;
-
-        return !strcmp(a->metadata->topic, b->metadata->topic);
-}
-
-/**
- * Maps the available topics to the group members' subscriptions
- * and updates the `member` map with the proper list of eligible topics,
- * the latter are returned in `eligible_topics`.
- */
-static void
-rd_kafka_member_subscriptions_map (rd_kafka_cgrp_t *rkcg,
-                                   rd_list_t *eligible_topics,
-                                   const rd_kafka_metadata_t *metadata,
-                                   rd_kafka_group_member_t *members,
-                                   int member_cnt) {
-        int ti;
-        rd_kafka_assignor_topic_t *eligible_topic = NULL;
-
-        rd_list_init(eligible_topics, RD_MIN(metadata->topic_cnt, 10),
-                     (void *)rd_kafka_assignor_topic_destroy);
-
-        /* For each topic in the cluster, scan through the member list
-         * to find matching subscriptions. */
-        for (ti = 0 ; ti < metadata->topic_cnt ; ti++) {
-                int complete_cnt = 0;
-                int i;
-
-                /* Ignore topics in blacklist */
-                if (rkcg->rkcg_rk->rk_conf.topic_blacklist &&
-		    rd_kafka_pattern_match(rkcg->rkcg_rk->rk_conf.
-                                           topic_blacklist,
-                                           metadata->topics[ti].topic)) {
-                        rd_kafka_dbg(rkcg->rkcg_rk, TOPIC, "BLACKLIST",
-                                   "Assignor ignoring blacklisted "
-                                     "topic \"%s\"",
-                                     metadata->topics[ti].topic);
-                        continue;
-                }
-
-                if (!eligible_topic)
-                        eligible_topic = rd_calloc(1, sizeof(*eligible_topic));
-
-                rd_list_init(&eligible_topic->members, member_cnt, NULL);
-
-                /* For each member: scan through its topic subscription */
-                for (i = 0 ; i < member_cnt ; i++) {
-                        /* Match topic against existing metadata,
-                           incl regex matching. */
-                        if (rd_kafka_member_subscription_match(
-                                    rkcg, &members[i], &metadata->topics[ti],
-                                    eligible_topic))
-                                complete_cnt++;
-                }
-
-                if (rd_list_empty(&eligible_topic->members)) {
-                        rd_list_destroy(&eligible_topic->members);
-                        continue;
-                }
-
-                eligible_topic->metadata = &metadata->topics[ti];
-                rd_list_add(eligible_topics, eligible_topic);
-                eligible_topic = NULL;
-
-                if (complete_cnt == (int)member_cnt)
-                        break;
-        }
-
-        if (eligible_topic)
-                rd_free(eligible_topic);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_assignor_run (rd_kafka_cgrp_t *rkcg,
-                       const char *protocol_name,
-                       rd_kafka_metadata_t *metadata,
-                       rd_kafka_group_member_t *members,
-                       int member_cnt,
-                       char *errstr, size_t errstr_size) {
-        rd_kafka_resp_err_t err;
-        rd_kafka_assignor_t *rkas;
-        rd_ts_t ts_start = rd_clock();
-        int i;
-        rd_list_t eligible_topics;
-        int j;
-
-	if (!(rkas = rd_kafka_assignor_find(rkcg->rkcg_rk, protocol_name)) ||
-	    !rkas->rkas_enabled) {
-		rd_snprintf(errstr, errstr_size,
-			    "Unsupported assignor \"%s\"", protocol_name);
-		return RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL;
-	}
-
-
-        /* Map available topics to subscribing members */
-        rd_kafka_member_subscriptions_map(rkcg, &eligible_topics, metadata,
-                                          members, member_cnt);
-
-
-        if (rkcg->rkcg_rk->rk_conf.debug & RD_KAFKA_DBG_CGRP) {
-                rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN",
-                             "Group \"%s\" running %s assignment for "
-                             "%d member(s):",
-                             rkcg->rkcg_group_id->str, protocol_name,
-                             member_cnt);
-
-                for (i = 0 ; i < member_cnt ; i++) {
-                        const rd_kafka_group_member_t *member = &members[i];
-
-                        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN",
-                                     " Member \"%.*s\"%s with "
-                                     "%d subscription(s):",
-                                     RD_KAFKAP_STR_PR(member->rkgm_member_id),
-                                     !rd_kafkap_str_cmp(member->rkgm_member_id,
-                                                        rkcg->rkcg_member_id) ?
-                                     " (me)":"",
-                                     member->rkgm_subscription->cnt);
-                        for (j = 0 ; j < member->rkgm_subscription->cnt ; j++) {
-                                const rd_kafka_topic_partition_t *p =
-                                        &member->rkgm_subscription->elems[j];
-                                rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN",
-                                             "  %s [%"PRId32"]",
-                                             p->topic, p->partition);
-                        }
-                }
-
-
-        }
-
-        /* Call assignors assign callback */
-        err = rkas->rkas_assign_cb(rkcg->rkcg_rk,
-                                    rkcg->rkcg_member_id->str,
-                                    protocol_name, metadata,
-                                    members, member_cnt,
-                                    (rd_kafka_assignor_topic_t **)
-                                    eligible_topics.rl_elems,
-                                    eligible_topics.rl_cnt,
-                                    errstr, sizeof(errstr),
-                                    rkas->rkas_opaque);
-
-        if (err) {
-                rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN",
-                             "Group \"%s\" %s assignment failed "
-                             "for %d member(s): %s",
-                             rkcg->rkcg_group_id->str, protocol_name,
-                             (int)member_cnt, errstr);
-        } else if (rkcg->rkcg_rk->rk_conf.debug & RD_KAFKA_DBG_CGRP) {
-                rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN",
-                             "Group \"%s\" %s assignment for %d member(s) "
-                             "finished in %.3fms:",
-                             rkcg->rkcg_group_id->str, protocol_name,
-                             (int)member_cnt,
-                             (float)(rd_clock() - ts_start)/1000.0f);
-                for (i = 0 ; i < member_cnt ; i++) {
-                        const rd_kafka_group_member_t *member = &members[i];
-
-                        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN",
-                                     " Member \"%.*s\"%s assigned "
-                                     "%d partition(s):",
-                                     RD_KAFKAP_STR_PR(member->rkgm_member_id),
-                                     !rd_kafkap_str_cmp(member->rkgm_member_id,
-                                                        rkcg->rkcg_member_id) ?
-                                     " (me)":"",
-                                     member->rkgm_assignment->cnt);
-                        for (j = 0 ; j < member->rkgm_assignment->cnt ; j++) {
-                                const rd_kafka_topic_partition_t *p =
-                                        &member->rkgm_assignment->elems[j];
-                                rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN",
-                                             "  %s [%"PRId32"]",
-                                             p->topic, p->partition);
-                        }
-                }
-        }
-
-        rd_list_destroy(&eligible_topics);
-
-        return err;
-}
-
-
-/**
- * Assignor protocol string comparator
- */
-static int rd_kafka_assignor_cmp_str (const void *_a, const void *_b) {
-        const char *a = _a;
-        const rd_kafka_assignor_t *b = _b;
-
-        return rd_kafkap_str_cmp_str2(a, b->rkas_protocol_name);
-}
-
-/**
- * Find assignor by protocol name.
- *
- * Locality: any
- * Locks: none
- */
-rd_kafka_assignor_t *
-rd_kafka_assignor_find (rd_kafka_t *rk, const char *protocol) {
-        return (rd_kafka_assignor_t *)
-                rd_list_find(&rk->rk_conf.partition_assignors, protocol,
-                             rd_kafka_assignor_cmp_str);
-}
-
-
-/**
- * Destroys an assignor (but does not unlink).
- */
-static void rd_kafka_assignor_destroy (rd_kafka_assignor_t *rkas) {
-        rd_kafkap_str_destroy(rkas->rkas_protocol_type);
-        rd_kafkap_str_destroy(rkas->rkas_protocol_name);
-        rd_free(rkas);
-}
-
-
-
-/**
- * Add an assignor, overwriting any previous one with the same protocol_name.
- */
-static rd_kafka_resp_err_t
-rd_kafka_assignor_add (rd_kafka_t *rk,
-		       rd_kafka_assignor_t **rkasp,
-                       const char *protocol_type,
-                       const char *protocol_name,
-                       rd_kafka_resp_err_t (*assign_cb) (
-                               rd_kafka_t *rk,
-                               const char *member_id,
-                               const char *protocol_name,
-                               const rd_kafka_metadata_t *metadata,
-                               rd_kafka_group_member_t *members,
-                               size_t member_cnt,
-                               rd_kafka_assignor_topic_t **eligible_topics,
-                               size_t eligible_topic_cnt,
-                               char *errstr, size_t errstr_size, void *opaque),
-                       void *opaque) {
-        rd_kafka_assignor_t *rkas;
-
-	if (rkasp)
-		*rkasp = NULL;
-
-        if (rd_kafkap_str_cmp_str(rk->rk_conf.group_protocol_type,
-                                  protocol_type))
-                return RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL;
-
-        /* Dont overwrite application assignors */
-        if ((rkas = rd_kafka_assignor_find(rk, protocol_name))) {
-		if (rkasp)
-			*rkasp = rkas;
-		return RD_KAFKA_RESP_ERR__CONFLICT;
-	}
-
-        rkas = rd_calloc(1, sizeof(*rkas));
-
-        rkas->rkas_protocol_name    = rd_kafkap_str_new(protocol_name, -1);
-        rkas->rkas_protocol_type    = rd_kafkap_str_new(protocol_type, -1);
-        rkas->rkas_assign_cb        = assign_cb;
-        rkas->rkas_get_metadata_cb  = rd_kafka_assignor_get_metadata;
-        rkas->rkas_opaque = opaque;
-
-        rd_list_add(&rk->rk_conf.partition_assignors, rkas);
-
-	if (rkasp)
-		*rkasp = rkas;
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/* Right trim string of whitespaces */
-static void rtrim (char *s) {
-	char *e = s + strlen(s);
-
-	if (e == s)
-		return;
-
-	while (e >= s && isspace(*e))
-		e--;
-
-	*e = '\0';
-}
-
-
-/**
- * Initialize assignor list based on configuration.
- */
-int rd_kafka_assignors_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) {
-	char *wanted;
-	char *s;
-
-        rd_list_init(&rk->rk_conf.partition_assignors, 2,
-                     (void *)rd_kafka_assignor_destroy);
-
-	rd_strdupa(&wanted, rk->rk_conf.partition_assignment_strategy);
-
-	s = wanted;
-	while (*s) {
-		rd_kafka_assignor_t *rkas = NULL;
-		char *t;
-
-		/* Left trim */
-		while (*s == ' ' || *s == ',')
-			s++;
-
-		if ((t = strchr(s, ','))) {
-			*t = '\0';
-			t++;
-		} else {
-			t = s + strlen(s);
-		}
-
-		/* Right trim */
-		rtrim(s);
-
-		/* Match builtin consumer assignors */
-		if (!strcmp(s, "range"))
-			rd_kafka_assignor_add(
-				rk, &rkas, "consumer", "range",
-				rd_kafka_range_assignor_assign_cb,
-				NULL);
-		else if (!strcmp(s, "roundrobin"))
-			rd_kafka_assignor_add(
-				rk, &rkas, "consumer", "roundrobin",
-				rd_kafka_roundrobin_assignor_assign_cb,
-				NULL);
-		else {
-			rd_snprintf(errstr, errstr_size,
-				    "Unsupported partition.assignment.strategy:"
-				    " %s", s);
-			return -1;
-		}
-
-		if (rkas) {
-			if (!rkas->rkas_enabled) {
-				rkas->rkas_enabled = 1;
-				rk->rk_conf.enabled_assignor_cnt++;
-			}
-		}
-
-		s = t;
-	}
-
-	return 0;
-}
-
-
-
-/**
- * Free assignors
- */
-void rd_kafka_assignors_term (rd_kafka_t *rk) {
-        rd_list_destroy(&rk->rk_conf.partition_assignors);
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_assignor.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_assignor.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_assignor.h
deleted file mode 100644
index 75a2dd8..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_assignor.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#pragma once
-
-
-
-
-
-typedef struct rd_kafka_group_member_s {
-        rd_kafka_topic_partition_list_t *rkgm_subscription;
-        rd_kafka_topic_partition_list_t *rkgm_assignment;
-        rd_list_t                        rkgm_eligible;
-        rd_kafkap_str_t                 *rkgm_member_id;
-        rd_kafkap_bytes_t               *rkgm_userdata;
-        rd_kafkap_bytes_t               *rkgm_member_metadata;
-} rd_kafka_group_member_t;
-
-
-int rd_kafka_group_member_cmp (const void *_a, const void *_b);
-
-int
-rd_kafka_group_member_find_subscription (rd_kafka_t *rk,
-					 const rd_kafka_group_member_t *rkgm,
-					 const char *topic);
-
-
-/**
- * Structure to hold metadata for a single topic and all its
- * subscribing members.
- */
-typedef struct rd_kafka_assignor_topic_s {
-        const rd_kafka_metadata_topic_t *metadata;
-        rd_list_t members;     /* rd_kafka_group_member_t * */
-} rd_kafka_assignor_topic_t;
-
-
-int rd_kafka_assignor_topic_cmp (const void *_a, const void *_b);
-
-
-typedef struct rd_kafka_assignor_s {
-        rd_kafkap_str_t   *rkas_protocol_type;
-        rd_kafkap_str_t   *rkas_protocol_name;
-
-        const void        *rkas_userdata;
-        size_t             rkas_userdata_size;
-
-	int                rkas_enabled;
-
-        rd_kafka_resp_err_t (*rkas_assign_cb) (
-                rd_kafka_t *rk,
-                const char *member_id,
-                const char *protocol_name,
-                const rd_kafka_metadata_t *metadata,
-                rd_kafka_group_member_t *members,
-                size_t member_cnt,
-                rd_kafka_assignor_topic_t **eligible_topics,
-                size_t eligible_topic_cnt,
-                char *errstr,
-                size_t errstr_size,
-                void *opaque);
-
-        rd_kafkap_bytes_t *(*rkas_get_metadata_cb) (
-                struct rd_kafka_assignor_s *rkpas,
-		const rd_list_t *topics);
-
-
-        void (*rkas_on_assignment_cb) (const char *member_id,
-                                        rd_kafka_group_member_t
-                                        *assignment, void *opaque);
-
-        void *rkas_opaque;
-} rd_kafka_assignor_t;
-
-
-rd_kafkap_bytes_t *
-rd_kafka_assignor_get_metadata (rd_kafka_assignor_t *rkpas,
-				const rd_list_t *topics);
-
-
-void rd_kafka_assignor_update_subscription (rd_kafka_assignor_t *rkpas,
-                                            const rd_kafka_topic_partition_list_t
-                                            *subscription);
-
-
-rd_kafka_resp_err_t
-rd_kafka_assignor_run (struct rd_kafka_cgrp_s *rkcg,
-                       const char *protocol_name,
-                       rd_kafka_metadata_t *metadata,
-                       rd_kafka_group_member_t *members, int member_cnt,
-                       char *errstr, size_t errstr_size);
-
-rd_kafka_assignor_t *
-rd_kafka_assignor_find (rd_kafka_t *rk, const char *protocol);
-
-int rd_kafka_assignors_init (rd_kafka_t *rk, char *errstr, size_t errstr_size);
-void rd_kafka_assignors_term (rd_kafka_t *rk);
-
-
-
-void rd_kafka_group_member_clear (rd_kafka_group_member_t *rkgm);
-
-
-/**
- * rd_kafka_range_assignor.c
- */
-rd_kafka_resp_err_t
-rd_kafka_range_assignor_assign_cb (rd_kafka_t *rk,
-                                   const char *member_id,
-                                   const char *protocol_name,
-                                   const rd_kafka_metadata_t *metadata,
-                                   rd_kafka_group_member_t *members,
-                                   size_t member_cnt,
-                                   rd_kafka_assignor_topic_t **eligible_topics,
-                                   size_t eligible_topic_cnt,
-                                   char *errstr, size_t errstr_size,
-                                   void *opaque);
-
-
-/**
- * rd_kafka_roundrobin_assignor.c
- */
-rd_kafka_resp_err_t
-rd_kafka_roundrobin_assignor_assign_cb (rd_kafka_t *rk,
-					const char *member_id,
-					const char *protocol_name,
-					const rd_kafka_metadata_t *metadata,
-					rd_kafka_group_member_t *members,
-					size_t member_cnt,
-					rd_kafka_assignor_topic_t
-					**eligible_topics,
-					size_t eligible_topic_cnt,
-					char *errstr, size_t errstr_size,
-					void *opaque);
-


[04/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/cmake/README.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/cmake/README.md b/thirdparty/librdkafka-0.11.4/packaging/cmake/README.md
new file mode 100644
index 0000000..47ad2cb
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/cmake/README.md
@@ -0,0 +1,38 @@
+# Build librdkafka with cmake
+
+The cmake build mode is experimental and not officially supported,
+the community is asked to maintain and support this mode through PRs.
+
+Set up build environment (from top-level librdkafka directory):
+
+    $ cmake -H. -B_cmake_build
+
+On MacOSX and OpenSSL from Homebrew you might need to do:
+
+    $ cmake -H. -B_cmake_build -DOPENSSL_ROOT_DIR=/usr/local/opt/openssl
+
+
+Build the library:
+
+    $ cmake --build _cmake_build
+
+If you want to build static library:
+
+    $ cmake --build _cmake_build -DRDKAFKA_BUILD_STATIC=1
+
+
+Run (local) tests:
+
+    $ (cd _cmake_build && ctest -VV -R RdKafkaTestBrokerLess)
+
+
+Install library:
+
+    $ cmake --build _cmake_build --target install
+
+
+If you use librdkafka as submodule in cmake project and want static link of librdkafka:
+
+      set(RDKAFKA_BUILD_STATIC ON CACHE BOOL "")
+      add_subdirectory(librdkafka)
+      target_link_libraries(your_library_or_executable rdkafka)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/cmake/config.h.in
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/cmake/config.h.in b/thirdparty/librdkafka-0.11.4/packaging/cmake/config.h.in
new file mode 100644
index 0000000..5c03b4d
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/cmake/config.h.in
@@ -0,0 +1,40 @@
+#cmakedefine01 WITHOUT_OPTIMIZATION
+#cmakedefine01 ENABLE_DEVEL
+#cmakedefine01 ENABLE_REFCNT_DEBUG
+#cmakedefine01 ENABLE_SHAREDPTR_DEBUG
+
+#cmakedefine01 HAVE_ATOMICS_32
+#cmakedefine01 HAVE_ATOMICS_32_SYNC
+
+#if (HAVE_ATOMICS_32)
+# if (HAVE_ATOMICS_32_SYNC)
+#  define ATOMIC_OP32(OP1,OP2,PTR,VAL) __sync_ ## OP1 ## _and_ ## OP2(PTR, VAL)
+# else
+#  define ATOMIC_OP32(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)
+# endif
+#endif
+
+#cmakedefine01 HAVE_ATOMICS_64
+#cmakedefine01 HAVE_ATOMICS_64_SYNC
+
+#if (HAVE_ATOMICS_64)
+# if (HAVE_ATOMICS_64_SYNC)
+#  define ATOMIC_OP64(OP1,OP2,PTR,VAL) __sync_ ## OP1 ## _and_ ## OP2(PTR, VAL)
+# else
+#  define ATOMIC_OP64(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)
+# endif
+#endif
+
+
+#cmakedefine01 WITH_ZLIB
+#cmakedefine01 WITH_LIBDL
+#cmakedefine01 WITH_PLUGINS
+#define WITH_SNAPPY 1
+#define WITH_SOCKEM 1
+#cmakedefine01 WITH_SSL
+#cmakedefine01 WITH_SASL
+#cmakedefine01 WITH_SASL_SCRAM
+#cmakedefine01 WITH_SASL_CYRUS
+#cmakedefine01 HAVE_REGEX
+#cmakedefine01 HAVE_STRNDUP
+#define SOLIB_EXT "${CMAKE_SHARED_LIBRARY_SUFFIX}"

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/atomic_32_test.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/atomic_32_test.c b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/atomic_32_test.c
new file mode 100644
index 0000000..de9738a
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/atomic_32_test.c
@@ -0,0 +1,8 @@
+#include <inttypes.h>
+
+int32_t foo (int32_t i) {
+  return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
+}
+
+int main() {
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/atomic_64_test.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/atomic_64_test.c b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/atomic_64_test.c
new file mode 100644
index 0000000..a713c74
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/atomic_64_test.c
@@ -0,0 +1,8 @@
+#include <inttypes.h>
+
+int64_t foo (int64_t i) {
+  return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
+}
+
+int main() {
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/dlopen_test.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/dlopen_test.c b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/dlopen_test.c
new file mode 100644
index 0000000..61c2504
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/dlopen_test.c
@@ -0,0 +1,11 @@
+#include <string.h>
+#include <dlfcn.h>
+
+int main() {
+        void *h;
+        /* Try loading anything, we don't care if it works */
+        h = dlopen("__nothing_rdkafka.so", RTLD_NOW|RTLD_LOCAL);
+        if (h)
+                dlclose(h);
+        return 0;
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/libsasl2_test.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/libsasl2_test.c b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/libsasl2_test.c
new file mode 100644
index 0000000..3f3ab34
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/libsasl2_test.c
@@ -0,0 +1,7 @@
+#include <string.h>
+#include <sasl/sasl.h>
+
+int main() {
+        sasl_done();
+        return 0;
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/rdkafka_setup.cmake
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/rdkafka_setup.cmake b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/rdkafka_setup.cmake
new file mode 100644
index 0000000..b5a3535
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/rdkafka_setup.cmake
@@ -0,0 +1,76 @@
+try_compile(
+    HAVE_REGEX
+    "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+    "${TRYCOMPILE_SRC_DIR}/regex_test.c"
+)
+
+try_compile(
+    HAVE_STRNDUP
+    "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+    "${TRYCOMPILE_SRC_DIR}/strndup_test.c"
+)
+
+# Atomic 32 tests {
+set(LINK_ATOMIC NO)
+set(HAVE_ATOMICS_32 NO)
+set(HAVE_ATOMICS_32_SYNC NO)
+
+try_compile(
+    _atomics_32
+    "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+    "${TRYCOMPILE_SRC_DIR}/atomic_32_test.c"
+)
+
+if(_atomics_32)
+  set(HAVE_ATOMICS_32 YES)
+else()
+  try_compile(
+      _atomics_32_lib
+      "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+      "${TRYCOMPILE_SRC_DIR}/atomic_32_test.c"
+      LINK_LIBRARIES "-latomic"
+  )
+  if(_atomics_32_lib)
+    set(HAVE_ATOMICS_32 YES)
+    set(LINK_ATOMIC YES)
+  else()
+    try_compile(
+        HAVE_ATOMICS_32_SYNC
+        "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+        "${TRYCOMPILE_SRC_DIR}/sync_32_test.c"
+    )
+  endif()
+endif()
+# }
+
+# Atomic 64 tests {
+set(HAVE_ATOMICS_64 NO)
+set(HAVE_ATOMICS_64_SYNC NO)
+
+try_compile(
+    _atomics_64
+    "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+    "${TRYCOMPILE_SRC_DIR}/atomic_64_test.c"
+)
+
+if(_atomics_64)
+  set(HAVE_ATOMICS_64 YES)
+else()
+  try_compile(
+      _atomics_64_lib
+      "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+      "${TRYCOMPILE_SRC_DIR}/atomic_64_test.c"
+      LINK_LIBRARIES "-latomic"
+  )
+  if(_atomics_64_lib)
+    set(HAVE_ATOMICS_64 YES)
+    set(LINK_ATOMIC YES)
+  else()
+    try_compile(
+        HAVE_ATOMICS_64_SYNC
+        "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+        "${TRYCOMPILE_SRC_DIR}/sync_64_test.c"
+    )
+  endif()
+endif()
+# }

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/regex_test.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/regex_test.c b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/regex_test.c
new file mode 100644
index 0000000..1d6eeb3
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/regex_test.c
@@ -0,0 +1,10 @@
+#include <stddef.h>
+#include <regex.h>
+
+int main() {
+   regcomp(NULL, NULL, 0);
+   regexec(NULL, NULL, 0, NULL, 0);
+   regerror(0, NULL, NULL, 0);
+   regfree(NULL);
+   return 0;
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/strndup_test.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/strndup_test.c b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/strndup_test.c
new file mode 100644
index 0000000..9b62043
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/strndup_test.c
@@ -0,0 +1,5 @@
+#include <string.h>
+
+int main() {
+   return strndup("hi", 2) ? 0 : 1;
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/sync_32_test.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/sync_32_test.c b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/sync_32_test.c
new file mode 100644
index 0000000..44ba120
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/sync_32_test.c
@@ -0,0 +1,8 @@
+#include <inttypes.h>
+
+int32_t foo (int32_t i) {
+  return __sync_add_and_fetch(&i, 1);
+}
+
+int main() {
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/sync_64_test.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/sync_64_test.c b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/sync_64_test.c
new file mode 100644
index 0000000..ad06204
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/cmake/try_compile/sync_64_test.c
@@ -0,0 +1,8 @@
+#include <inttypes.h>
+
+int64_t foo (int64_t i) {
+  return __sync_add_and_fetch(&i, 1);
+}
+
+int main() {
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/.gitignore
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/.gitignore b/thirdparty/librdkafka-0.11.4/packaging/debian/.gitignore
new file mode 100644
index 0000000..eb66d4d
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/.gitignore
@@ -0,0 +1,6 @@
+*.log
+files
+librdkafka-dev
+librdkafka1-dbg
+librdkafka1
+tmp

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/changelog
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/changelog b/thirdparty/librdkafka-0.11.4/packaging/debian/changelog
new file mode 100644
index 0000000..c50cb5a
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/changelog
@@ -0,0 +1,66 @@
+librdkafka (0.8.6-1) unstable; urgency=medium
+
+  * New upstream release.
+  * Backport upstream commit f6fd0da, adding --disable-silent-rules
+    compatibility support to mklove. (Closes: #788742)
+
+ -- Faidon Liambotis <pa...@debian.org>  Sun, 19 Jul 2015 01:36:18 +0300
+
+librdkafka (0.8.5-2) unstable; urgency=medium
+
+  * Install rdkafka.pc in the right, multiarch location. (Closes: #766759)
+
+ -- Faidon Liambotis <pa...@debian.org>  Sun, 26 Oct 2014 06:47:07 +0200
+
+librdkafka (0.8.5-1) unstable; urgency=medium
+
+  * New upstream release.
+    - Fixes kFreeBSD FTBFS.
+  * Ship rdkafka.pc pkg-config in librdkafka-dev.
+
+ -- Faidon Liambotis <pa...@debian.org>  Fri, 24 Oct 2014 18:03:22 +0300
+
+librdkafka (0.8.4-1) unstable; urgency=medium
+
+  * New upstream release, including a new build system.
+    - Add Build-Depends on perl, required by configure.
+    - Support multiarch library paths.
+    - Better detection of architecture atomic builtins, supporting more
+      architectures. (Closes: #739930)
+    - Various portability bugs fixed. (Closes: #730506)
+    - Update debian/librdkafka1.symbols.
+  * Convert to a multiarch package.
+  * Switch to Architecture: any, because of renewed upstream portability.
+  * Update debian/copyright to add src/ before Files: paths.
+  * Update Standards-Version to 3.9.6, no changes needed.
+  * Ship only the C library for now, not the new C++ library; the latter is
+    still in flux in some ways and will probably be shipped in a separate
+    package in a future release.
+
+ -- Faidon Liambotis <pa...@debian.org>  Wed, 22 Oct 2014 23:57:24 +0300
+
+librdkafka (0.8.3-1) unstable; urgency=medium
+
+  * New upstream release.
+    - Multiple internal symbols hidden; breaks ABI without a SONAME bump, but
+      these were internal and should not break any applications, packaged or
+      not.
+  * Update Standards-Version to 3.9.5, no changes needed.
+
+ -- Faidon Liambotis <pa...@debian.org>  Tue, 18 Feb 2014 02:21:43 +0200
+
+librdkafka (0.8.1-1) unstable; urgency=medium
+
+  * New upstream release.
+    - Multiple fixes to FTBFS on various architectures. (Closes: #730506)
+    - Remove dh_auto_clean override, fixed upstream.
+  * Limit the set of architectures: upstream currently relies on 64-bit atomic
+    operations that several Debian architectures do not support.
+
+ -- Faidon Liambotis <pa...@debian.org>  Thu, 05 Dec 2013 16:53:28 +0200
+
+librdkafka (0.8.0-1) unstable; urgency=low
+
+  * Initial release. (Closes: #710271)
+
+ -- Faidon Liambotis <pa...@debian.org>  Mon, 04 Nov 2013 16:50:07 +0200

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/compat
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/compat b/thirdparty/librdkafka-0.11.4/packaging/debian/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/compat
@@ -0,0 +1 @@
+9

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/control
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/control b/thirdparty/librdkafka-0.11.4/packaging/debian/control
new file mode 100644
index 0000000..8274798
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/control
@@ -0,0 +1,49 @@
+Source: librdkafka
+Priority: optional
+Maintainer: Faidon Liambotis <pa...@debian.org>
+Build-Depends: debhelper (>= 9), zlib1g-dev, libssl-dev, libsasl2-dev, python
+Standards-Version: 3.9.6
+Section: libs
+Homepage: https://github.com/edenhill/librdkafka
+Vcs-Git: git://github.com/edenhill/librdkafka.git -b debian
+Vcs-Browser: https://github.com/edenhill/librdkafka/tree/debian
+
+Package: librdkafka1
+Architecture: any
+Multi-Arch: same
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: library implementing the Apache Kafka protocol
+ librdkafka is a C implementation of the Apache Kafka protocol. It currently
+ implements the 0.8 version of the protocol and can be used to develop both
+ Producers and Consumers.
+ .
+ More information about Apache Kafka can be found at http://kafka.apache.org/
+
+Package: librdkafka-dev
+Section: libdevel
+Architecture: any
+Multi-Arch: same
+Depends: librdkafka1 (= ${binary:Version}), ${misc:Depends}
+Description: library implementing the Apache Kafka protocol (development headers)
+ librdkafka is a C implementation of the Apache Kafka protocol. It currently
+ implements the 0.8 version of the protocol and can be used to develop both
+ Producers and Consumers.
+ .
+ More information about Apache Kafka can be found at http://kafka.apache.org/
+ .
+ This package contains the development headers.
+
+Package: librdkafka1-dbg
+Section: debug
+Priority: extra
+Architecture: any
+Multi-Arch: same
+Depends: librdkafka1 (= ${binary:Version}), ${misc:Depends}
+Description: library implementing the Apache Kafka protocol (debugging symbols)
+ librdkafka is a C implementation of the Apache Kafka protocol. It currently
+ implements the 0.8 version of the protocol and can be used to develop both
+ Producers and Consumers.
+ .
+ More information about Apache Kafka can be found at http://kafka.apache.org/
+ .
+ This package contains the debugging symbols.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/copyright
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/copyright b/thirdparty/librdkafka-0.11.4/packaging/debian/copyright
new file mode 100644
index 0000000..20885d9
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/copyright
@@ -0,0 +1,84 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: librdkafka
+Source: https://github.com/edenhill/librdkafka
+
+License: BSD-2-clause
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions are met:
+  .
+  1. Redistributions of source code must retain the above copyright notice,
+     this list of conditions and the following disclaimer.
+  2. Redistributions in binary form must reproduce the above copyright notice,
+     this list of conditions and the following disclaimer in the documentation
+     and/or other materials provided with the distribution.
+  .
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+Files: *
+Copyright: 2012-2015, Magnus Edenhill
+License: BSD-2-clause
+
+Files: src/rdcrc32.c src/rdcrc32.h
+Copyright: 2006-2012, Thomas Pircher <te...@gmx.net>
+License: MIT
+  Permission is hereby granted, free of charge, to any person obtaining a copy
+  of this software and associated documentation files (the "Software"), to deal
+  in the Software without restriction, including without limitation the rights
+  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+  copies of the Software, and to permit persons to whom the Software is
+  furnished to do so, subject to the following conditions:
+  .
+  The above copyright notice and this permission notice shall be included in
+  all copies or substantial portions of the Software.
+  . 
+  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+  THE SOFTWARE.
+
+Files: src/snappy.c src/snappy.h src/snappy_compat.h
+Copyright: 2005, Google Inc.
+           2011, Intel Corporation
+License: BSD-3-clause
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions are
+  met:
+  .
+      * Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+      * Redistributions in binary form must reproduce the above
+  copyright notice, this list of conditions and the following disclaimer
+  in the documentation and/or other materials provided with the
+  distribution.
+      * Neither the name of Google Inc. nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+  .
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Files: debian/*
+Copyright: 2013 Faidon Liambotis <pa...@debian.org>
+License: BSD-2-clause

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/docs
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/docs b/thirdparty/librdkafka-0.11.4/packaging/debian/docs
new file mode 100644
index 0000000..891afcd
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/docs
@@ -0,0 +1,3 @@
+README.md
+INTRODUCTION.md
+CONFIGURATION.md

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/gbp.conf
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/gbp.conf b/thirdparty/librdkafka-0.11.4/packaging/debian/gbp.conf
new file mode 100644
index 0000000..b2a0f02
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/gbp.conf
@@ -0,0 +1,9 @@
+[buildpackage]
+upstream-tree=tag
+upstream-branch=master
+debian-branch=debian
+upstream-tag=%(version)s
+debian-tag=debian/%(version)s
+no-create-orig = True
+tarball-dir = ../tarballs
+export-dir = ../build-area

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.dirs
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.dirs b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.dirs
new file mode 100644
index 0000000..4418816
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.dirs
@@ -0,0 +1,2 @@
+usr/lib
+usr/include

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.examples
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.examples b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.examples
new file mode 100644
index 0000000..b45032e
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.examples
@@ -0,0 +1,2 @@
+examples/rdkafka_example.c
+examples/rdkafka_performance.c

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.install
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.install b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.install
new file mode 100644
index 0000000..478f660
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.install
@@ -0,0 +1,6 @@
+usr/include/*/rdkafka.h
+usr/include/*/rdkafkacpp.h
+usr/lib/*/librdkafka.a
+usr/lib/*/librdkafka.so
+usr/lib/*/librdkafka++.a
+usr/lib/*/librdkafka++.so

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.substvars
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.substvars b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.substvars
new file mode 100644
index 0000000..abd3ebe
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka-dev.substvars
@@ -0,0 +1 @@
+misc:Depends=

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka.dsc
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka.dsc b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka.dsc
new file mode 100644
index 0000000..65826d4
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka.dsc
@@ -0,0 +1,16 @@
+Format: 3.0 (quilt)
+Source: librdkafka
+Binary: librdkafka1, librdkafka-dev, librdkafka1-dbg
+Architecture: any
+Version: 0.9.1-1pre1
+Maintainer: Magnus Edenhill <li...@edenhill.se>
+Homepage: https://github.com/edenhill/librdkafka
+Standards-Version: 3.9.6
+Vcs-Browser: https://github.com/edenhill/librdkafka/tree/master
+Vcs-Git: git://github.com/edenhill/librdkafka.git -b master
+Build-Depends: debhelper (>= 9), zlib1g-dev, libssl-dev, libsasl2-dev, python
+Package-List:
+ librdkafka-dev deb libdevel optional arch=any
+ librdkafka1 deb libs optional arch=any
+ librdkafka1-dbg deb debug extra arch=any
+Original-Maintainer: Faidon Liambotis <pa...@debian.org>

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1-dbg.substvars
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1-dbg.substvars b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1-dbg.substvars
new file mode 100644
index 0000000..abd3ebe
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1-dbg.substvars
@@ -0,0 +1 @@
+misc:Depends=

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.dirs
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.dirs b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.dirs
new file mode 100644
index 0000000..6845771
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.dirs
@@ -0,0 +1 @@
+usr/lib

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.install
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.install b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.install
new file mode 100644
index 0000000..7e86e5f
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.install
@@ -0,0 +1,2 @@
+usr/lib/*/librdkafka.so.*
+usr/lib/*/librdkafka++.so.*

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.postinst.debhelper
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.postinst.debhelper b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.postinst.debhelper
new file mode 100644
index 0000000..3d89d3e
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.postinst.debhelper
@@ -0,0 +1,5 @@
+# Automatically added by dh_makeshlibs
+if [ "$1" = "configure" ]; then
+	ldconfig
+fi
+# End automatically added section

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.postrm.debhelper
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.postrm.debhelper b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.postrm.debhelper
new file mode 100644
index 0000000..7f44047
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.postrm.debhelper
@@ -0,0 +1,5 @@
+# Automatically added by dh_makeshlibs
+if [ "$1" = "remove" ]; then
+	ldconfig
+fi
+# End automatically added section

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.symbols
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.symbols b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.symbols
new file mode 100644
index 0000000..0ef576e
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/librdkafka1.symbols
@@ -0,0 +1,64 @@
+librdkafka.so.1 librdkafka1 #MINVER#
+* Build-Depends-Package: librdkafka-dev
+ rd_kafka_brokers_add@Base 0.8.0
+ rd_kafka_conf_destroy@Base 0.8.0
+ rd_kafka_conf_dump@Base 0.8.3
+ rd_kafka_conf_dump_free@Base 0.8.3
+ rd_kafka_conf_dup@Base 0.8.3
+ rd_kafka_conf_new@Base 0.8.0
+ rd_kafka_conf_properties_show@Base 0.8.0
+ rd_kafka_conf_set@Base 0.8.0
+ rd_kafka_conf_set_dr_cb@Base 0.8.0
+ rd_kafka_conf_set_dr_msg_cb@Base 0.8.4
+ rd_kafka_conf_set_error_cb@Base 0.8.0
+ rd_kafka_conf_set_log_cb@Base 0.8.4
+ rd_kafka_conf_set_opaque@Base 0.8.0
+ rd_kafka_conf_set_open_cb@Base 0.8.4
+ rd_kafka_conf_set_socket_cb@Base 0.8.4
+ rd_kafka_conf_set_stats_cb@Base 0.8.0
+ rd_kafka_consume@Base 0.8.0
+ rd_kafka_consume_batch@Base 0.8.0
+ rd_kafka_consume_batch_queue@Base 0.8.4
+ rd_kafka_consume_callback@Base 0.8.0
+ rd_kafka_consume_callback_queue@Base 0.8.4
+ rd_kafka_consume_queue@Base 0.8.4
+ rd_kafka_consume_start@Base 0.8.0
+ rd_kafka_consume_start_queue@Base 0.8.4
+ rd_kafka_consume_stop@Base 0.8.0
+ rd_kafka_destroy@Base 0.8.0
+ rd_kafka_dump@Base 0.8.0
+ rd_kafka_err2str@Base 0.8.0
+ rd_kafka_errno2err@Base 0.8.3
+ rd_kafka_log_print@Base 0.8.0
+ rd_kafka_log_syslog@Base 0.8.0
+ rd_kafka_message_destroy@Base 0.8.0
+ rd_kafka_metadata@Base 0.8.4
+ rd_kafka_metadata_destroy@Base 0.8.4
+ rd_kafka_msg_partitioner_random@Base 0.8.0
+ rd_kafka_name@Base 0.8.0
+ rd_kafka_new@Base 0.8.0
+ rd_kafka_offset_store@Base 0.8.3
+ rd_kafka_opaque@Base 0.8.4
+ rd_kafka_outq_len@Base 0.8.0
+ rd_kafka_poll@Base 0.8.0
+ rd_kafka_produce@Base 0.8.0
+ rd_kafka_produce_batch@Base 0.8.4
+ rd_kafka_queue_destroy@Base 0.8.4
+ rd_kafka_queue_new@Base 0.8.4
+ rd_kafka_set_log_level@Base 0.8.0
+ rd_kafka_set_logger@Base 0.8.0
+ rd_kafka_thread_cnt@Base 0.8.0
+ rd_kafka_topic_conf_destroy@Base 0.8.0
+ rd_kafka_topic_conf_dump@Base 0.8.3
+ rd_kafka_topic_conf_dup@Base 0.8.3
+ rd_kafka_topic_conf_new@Base 0.8.0
+ rd_kafka_topic_conf_set@Base 0.8.0
+ rd_kafka_topic_conf_set_opaque@Base 0.8.0
+ rd_kafka_topic_conf_set_partitioner_cb@Base 0.8.0
+ rd_kafka_topic_destroy@Base 0.8.0
+ rd_kafka_topic_name@Base 0.8.0
+ rd_kafka_topic_new@Base 0.8.0
+ rd_kafka_topic_partition_available@Base 0.8.0
+ rd_kafka_version@Base 0.8.1
+ rd_kafka_version_str@Base 0.8.1
+ rd_kafka_wait_destroyed@Base 0.8.0

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/rules
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/rules b/thirdparty/librdkafka-0.11.4/packaging/debian/rules
new file mode 100755
index 0000000..a18c40d
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/rules
@@ -0,0 +1,19 @@
+#!/usr/bin/make -f
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+%:
+	dh $@
+
+override_dh_strip:
+	dh_strip --dbg-package=librdkafka1-dbg
+
+override_dh_auto_install:
+	dh_auto_install
+	install -D -m 0644 rdkafka.pc \
+		debian/librdkafka-dev/usr/lib/${DEB_HOST_MULTIARCH}/pkgconfig/rdkafka.pc
+	install -D -m 0644 rdkafka-static.pc \
+		debian/librdkafka-dev/usr/lib/${DEB_HOST_MULTIARCH}/pkgconfig/rdkafka-static.pc
+
+.PHONY: override_dh_strip override_dh_auth_install

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/source/format
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/source/format b/thirdparty/librdkafka-0.11.4/packaging/debian/source/format
new file mode 100644
index 0000000..163aaf8
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/debian/watch
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/debian/watch b/thirdparty/librdkafka-0.11.4/packaging/debian/watch
new file mode 100644
index 0000000..fc9aec8
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/debian/watch
@@ -0,0 +1,2 @@
+version=3
+http://github.com/edenhill/librdkafka/tags .*/(\d[\d\.]*)\.tar\.gz

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/get_version.py
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/get_version.py b/thirdparty/librdkafka-0.11.4/packaging/get_version.py
new file mode 100755
index 0000000..3d98d21
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/get_version.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+import sys
+
+if len(sys.argv) != 2:
+    raise Exception('Usage: %s path/to/rdkafka.h' % sys.argv[0])
+
+kafka_h_file = sys.argv[1]
+f = open(kafka_h_file)
+for line in f:
+    if '#define RD_KAFKA_VERSION' in line:
+        version = line.split()[-1]
+        break
+f.close()
+
+major = int(version[2:4], 16)
+minor = int(version[4:6], 16)
+patch = int(version[6:8], 16)
+version = '.'.join(str(item) for item in (major, minor, patch))
+
+print version

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/homebrew/README.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/homebrew/README.md b/thirdparty/librdkafka-0.11.4/packaging/homebrew/README.md
new file mode 100644
index 0000000..a23a085
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/homebrew/README.md
@@ -0,0 +1,15 @@
+# Update the Homebrew librdkafka package version
+
+The `./brew-update-pr.sh` script in this directory updates the
+brew formula for librdkafka and pushes a PR to the homebrew-core repository.
+
+You should run it in two steps, first an implicit dry-run mode
+to check that things seem correct, and if that checks out a
+live upload mode which actually pushes the PR.
+
+    # Do a dry-run first, v0.11.0 is the librdkafka tag:
+    $ ./brew-update-pr.sh v0.11.0
+
+    # If everything looks okay, run the live upload mode:
+    $ ./brew-update-pr.sh --upload v0.11.0
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/homebrew/brew-update-pr.sh
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/homebrew/brew-update-pr.sh b/thirdparty/librdkafka-0.11.4/packaging/homebrew/brew-update-pr.sh
new file mode 100755
index 0000000..f756159
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/homebrew/brew-update-pr.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Automatically pushes a PR to homebrew-core to update
+# the librdkafka version.
+#
+# Usage:
+#   # Dry-run:
+#   ./brew-update-pr.sh v0.11.0
+#   # if everything looks good:
+#   ./brew-update-pr.sh --upload v0.11.0
+#
+
+
+DRY_RUN="--dry-run"
+if [[ $1 == "--upload" ]]; then
+   DRY_RUN=
+   shift
+fi
+
+TAG=$1
+
+if [[ -z $TAG ]]; then
+    echo "Usage: $0 [--upload] <librdkafka-tag>"
+    exit 1
+fi
+
+set -eu
+
+brew bump-formula-pr $DRY_RUN --strict \
+     --url=https://github.com/edenhill/librdkafka/archive/${TAG}.tar.gz \
+     librdkafka

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/nuget/.gitignore
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/nuget/.gitignore b/thirdparty/librdkafka-0.11.4/packaging/nuget/.gitignore
new file mode 100644
index 0000000..712f08d
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/nuget/.gitignore
@@ -0,0 +1,5 @@
+dl-*
+out-*
+*.nupkg
+*.pyc
+__pycache__

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/nuget/README.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/nuget/README.md b/thirdparty/librdkafka-0.11.4/packaging/nuget/README.md
new file mode 100644
index 0000000..720a767
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/nuget/README.md
@@ -0,0 +1,50 @@
+# NuGet package assembly
+
+This set of scripts collect CI artifacts from S3 and assembles
+them into a NuGet package structure staging directory.
+The NuGet tool is then run (from within docker) on this staging directory
+to create a proper NuGet package (with all the metadata).
+
+The finalized nuget package maybe uploaded manually to NuGet.org
+
+## Requirements
+
+ * Requires Python 2.x (due to Python 3 compat issues with rpmfile)
+ * Requires Docker
+ * Requires private S3 access keys for the librdkafka-ci-packages bucket.
+
+
+
+## Usage
+
+1. Trigger CI builds by creating and pushing a new release (candidate) tag
+   in the librdkafka repo. Make sure the tag is created on the correct branch.
+
+    $ git tag v0.11.0
+    $ git push origin v0.11.0
+
+2. Wait for CI builds to finish, monitor the builds here:
+
+ * https://travis-ci.org/edenhill/librdkafka
+ * https://ci.appveyor.com/project/edenhill/librdkafka
+
+3. On a Linux host, run the release.py script to assemble the NuGet package
+
+    $ cd packaging/nuget
+    # Specify the tag
+    $ ./release.py v0.11.0
+    # Optionally, if the tag was moved and an exact sha is also required:
+    # $ ./release.py --sha <the-full-git-sha> v0.11.0
+
+4. If all artifacts were available the NuGet package will be built
+   and reside in the current directory as librdkafka.redist.<v-less-tag>.nupkg
+
+5. Test the package manually
+
+6. Upload the package to NuGet
+
+ * https://www.nuget.org/packages/manage/upload
+
+
+
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/nuget/artifact.py
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/nuget/artifact.py b/thirdparty/librdkafka-0.11.4/packaging/nuget/artifact.py
new file mode 100755
index 0000000..61b1d80
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/nuget/artifact.py
@@ -0,0 +1,173 @@
+#!/usr/bin/env python
+#
+#
+# Collects CI artifacts from S3 storage, downloading them
+# to a local directory.
+#
+# The artifacts' folder in the S3 bucket must have the following token
+# format:
+#  <token>-[<value>]__   (repeat)
+#
+# Recognized tokens (unrecognized tokens are ignored):
+#  p       - project (e.g., "confluent-kafka-python")
+#  bld     - builder (e.g., "travis")
+#  plat    - platform ("osx", "linux", ..)
+#  arch    - arch ("x64", ..)
+#  tag     - git tag
+#  sha     - git sha
+#  bid     - builder's build-id
+#  bldtype - Release, Debug (appveyor)
+#
+# Example:
+#   p-confluent-kafka-python__bld-travis__plat-linux__tag-__sha-112130ce297656ea1c39e7c94c99286f95133a24__bid-271588764__/confluent_kafka-0.11.0-cp35-cp35m-manylinux1_x86_64.whl
+
+
+import re
+import os
+import argparse
+import boto3
+
+s3_bucket = 'librdkafka-ci-packages'
+dry_run = False
+
+class Artifact (object):
+    def __init__(self, arts, path, info=None):
+        self.path = path
+        # Remove unexpanded AppVeyor $(..) tokens from filename
+        self.fname = re.sub(r'\$\([^\)]+\)', '', os.path.basename(path))
+        slpath = os.path.join(os.path.dirname(path), self.fname)
+        if os.path.isfile(slpath):
+            # Already points to local file in correct location
+            self.lpath = slpath
+        else:
+            # Prepare download location in dlpath
+            self.lpath = os.path.join(arts.dlpath, slpath)
+
+        if info is None:
+            self.info = dict()
+        else:
+            # Assign the map and convert all keys to lower case
+            self.info = {k.lower(): v for k, v in info.items()}
+            # Rename values, e.g., 'plat':'linux' to 'plat':'debian'
+            for k,v in self.info.items():
+                rdict = packaging.rename_vals.get(k, None)
+                if rdict is not None:
+                    self.info[k] = rdict.get(v, v)
+
+        # Score value for sorting
+        self.score = 0
+
+        # AppVeyor symbol builds are of less value
+        if self.fname.find('.symbols.') != -1:
+            self.score -= 10
+
+        self.arts = arts
+        arts.artifacts.append(self)
+
+
+    def __repr__(self):
+        return self.path
+
+    def __lt__ (self, other):
+        return self.score < other.score
+
+    def download(self):
+        """ Download artifact from S3 and store in local directory .lpath.
+            If the artifact is already downloaded nothing is done. """
+        if os.path.isfile(self.lpath) and os.path.getsize(self.lpath) > 0:
+            return
+        print('Downloading %s -> %s' % (self.path, self.lpath))
+        if dry_run:
+            return
+        ldir = os.path.dirname(self.lpath)
+        if not os.path.isdir(ldir):
+            os.makedirs(ldir, 0o755)
+        self.arts.s3_bucket.download_file(self.path, self.lpath)
+
+
+class Artifacts (object):
+    def __init__(self, match, dlpath):
+        super(Artifacts, self).__init__()
+        self.match = match
+        self.artifacts = list()
+        # Download directory (make sure it ends with a path separator)
+        if not dlpath.endswith(os.path.sep):
+            dlpath = os.path.join(dlpath, '')
+        self.dlpath = dlpath
+        if not os.path.isdir(self.dlpath):
+            if not dry_run:
+                os.makedirs(self.dlpath, 0o755)
+
+    def collect_single(self, path, req_tag=True):
+        """ Collect single artifact, be it in S3 or locally.
+        :param: path string: S3 or local (relative) path
+        :param: req_tag bool: Require tag to match.
+        """
+
+        print('?  %s' % path)
+
+        # For local files, strip download path.
+        # Also ignore any parent directories.
+        if path.startswith(self.dlpath):
+            folder = os.path.basename(os.path.dirname(path[len(self.dlpath):]))
+        else:
+            folder = os.path.basename(os.path.dirname(path))
+
+        # The folder contains the tokens needed to perform
+        # matching of project, gitref, etc.
+        rinfo = re.findall(r'(?P<tag>[^-]+)-(?P<val>.*?)__', folder)
+        if rinfo is None or len(rinfo) == 0:
+            print('Incorrect folder/file name format for %s' % folder)
+            return None
+
+        info = dict(rinfo)
+
+        # Ignore AppVeyor Debug builds
+        if info.get('bldtype', '').lower() == 'debug':
+            print('Ignoring debug artifact %s' % folder)
+            return None
+
+        tag = info.get('tag', None)
+        if tag is not None and (len(tag) == 0 or tag.startswith('$(')):
+            # AppVeyor doesn't substite $(APPVEYOR_REPO_TAG_NAME)
+            # with an empty value when not set, it leaves that token
+            # in the string - so translate that to no tag.
+            del info['tag']
+
+        # Match tag or sha to gitref
+        unmatched = list()
+        for m,v in self.match.items():
+            if m not in info or info[m] != v:
+                unmatched.append(m)
+
+        # Make sure all matches were satisfied, unless this is a
+        # common artifact.
+        if info.get('p', '') != 'common' and len(unmatched) > 0:
+            print(info)
+            print('%s: %s did not match %s' % (info.get('p', None), folder, unmatched))
+            return None
+
+        return Artifact(self, path, info)
+
+
+    def collect_s3(self):
+        """ Collect and download build-artifacts from S3 based on git reference """
+        print('Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket))
+        self.s3 = boto3.resource('s3')
+        self.s3_bucket = self.s3.Bucket(s3_bucket)
+        self.s3_client = boto3.client('s3')
+        for item in self.s3_client.list_objects(Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'):
+            self.collect_single(item.get('Key'))
+
+        for a in self.artifacts:
+            a.download()
+
+    def collect_local(self, path, req_tag=True):
+        """ Collect artifacts from a local directory possibly previously
+        collected from s3 """
+        for f in [os.path.join(dp, f) for dp, dn, filenames in os.walk(path) for f in filenames]:
+            if not os.path.isfile(f):
+                continue
+            self.collect_single(f, req_tag)
+
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip b/thirdparty/librdkafka-0.11.4/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip
new file mode 100644
index 0000000..8f24c8d
Binary files /dev/null and b/thirdparty/librdkafka-0.11.4/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip differ

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip b/thirdparty/librdkafka-0.11.4/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip
new file mode 100644
index 0000000..773546c
Binary files /dev/null and b/thirdparty/librdkafka-0.11.4/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip differ

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/nuget/nuget.sh
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/nuget/nuget.sh b/thirdparty/librdkafka-0.11.4/packaging/nuget/nuget.sh
new file mode 100755
index 0000000..0323712
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/nuget/nuget.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+#
+# Front-end for nuget that runs nuget in a docker image.
+
+set -ex
+
+if [[ -f /.dockerenv ]]; then
+    echo "Inside docker"
+
+    pushd $(dirname $0)
+
+    nuget $*
+
+    popd
+
+else
+    echo "Running docker image"
+    docker run -v $(pwd):/io mono:latest /io/$0 $*
+fi
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/nuget/packaging.py
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/nuget/packaging.py b/thirdparty/librdkafka-0.11.4/packaging/nuget/packaging.py
new file mode 100755
index 0000000..c8e7479
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/nuget/packaging.py
@@ -0,0 +1,421 @@
+#!/usr/bin/env python
+#
+# NuGet packaging script.
+# Assembles a NuGet package using CI artifacts in S3
+# and calls nuget (in docker) to finalize the package.
+#
+
+import sys
+import re
+import os
+import tempfile
+import shutil
+import subprocess
+import urllib
+from string import Template
+from collections import defaultdict
+import boto3
+from zfile import zfile
+
+
+# Rename token values
+rename_vals = {'plat': {'windows': 'win7'},
+               'arch': {'x86_64': 'x64',
+                        'i386': 'x86',
+                        'win32': 'x86'}}
+
+# Collects CI artifacts from S3 storage, downloading them
+# to a local directory, or collecting already downloaded artifacts from
+# local directory.
+#
+# The artifacts' folder in the S3 bucket must have the following token
+# format:
+#  <token>-[<value>]__   (repeat)
+#
+# Recognized tokens (unrecognized tokens are ignored):
+#  p       - project (e.g., "confluent-kafka-python")
+#  bld     - builder (e.g., "travis")
+#  plat    - platform ("osx", "linux", ..)
+#  arch    - arch ("x64", ..)
+#  tag     - git tag
+#  sha     - git sha
+#  bid     - builder's build-id
+#  bldtype - Release, Debug (appveyor)
+#
+# Example:
+#   librdkafka/p-librdkafka__bld-travis__plat-linux__arch-x64__tag-v0.0.62__sha-d051b2c19eb0c118991cd8bc5cf86d8e5e446cde__bid-1562.1/librdkafka.tar.gz
+
+
+s3_bucket = 'librdkafka-ci-packages'
+dry_run = False
+
+class Artifact (object):
+    def __init__(self, arts, path, info=None):
+        self.path = path
+        # Remove unexpanded AppVeyor $(..) tokens from filename
+        self.fname = re.sub(r'\$\([^\)]+\)', '', os.path.basename(path))
+        slpath = os.path.join(os.path.dirname(path), self.fname)
+        if os.path.isfile(slpath):
+            # Already points to local file in correct location
+            self.lpath = slpath
+        else:
+            # Prepare download location in dlpath
+            self.lpath = os.path.join(arts.dlpath, slpath)
+
+        if info is None:
+            self.info = dict()
+        else:
+            # Assign the map and convert all keys to lower case
+            self.info = {k.lower(): v for k, v in info.items()}
+            # Rename values, e.g., 'plat':'linux' to 'plat':'debian'
+            for k,v in self.info.items():
+                rdict = rename_vals.get(k, None)
+                if rdict is not None:
+                    self.info[k] = rdict.get(v, v)
+
+        # Score value for sorting
+        self.score = 0
+
+        # AppVeyor symbol builds are of less value
+        if self.fname.find('.symbols.') != -1:
+            self.score -= 10
+
+        self.arts = arts
+        arts.artifacts.append(self)
+
+
+    def __repr__(self):
+        return self.path
+
+    def __lt__ (self, other):
+        return self.score < other.score
+
+    def download(self):
+        """ Download artifact from S3 and store in local directory .lpath.
+            If the artifact is already downloaded nothing is done. """
+        if os.path.isfile(self.lpath) and os.path.getsize(self.lpath) > 0:
+            return
+        print('Downloading %s' % self.path)
+        if dry_run:
+            return
+        ldir = os.path.dirname(self.lpath)
+        if not os.path.isdir(ldir):
+            os.makedirs(ldir, 0o755)
+        self.arts.s3_bucket.download_file(self.path, self.lpath)
+
+
+class Artifacts (object):
+    def __init__(self, match, dlpath):
+        super(Artifacts, self).__init__()
+        self.match = match
+        self.artifacts = list()
+        # Download directory (make sure it ends with a path separator)
+        if not dlpath.endswith(os.path.sep):
+            dlpath = os.path.join(dlpath, '')
+        self.dlpath = dlpath
+        if not os.path.isdir(self.dlpath):
+            if not dry_run:
+                os.makedirs(self.dlpath, 0o755)
+
+
+    def collect_single(self, path, req_tag=True):
+        """ Collect single artifact, be it in S3 or locally.
+        :param: path string: S3 or local (relative) path
+        :param: req_tag bool: Require tag to match.
+        """
+
+        #print('?  %s' % path)
+
+        # For local files, strip download path.
+        # Also ignore any parent directories.
+        if path.startswith(self.dlpath):
+            folder = os.path.basename(os.path.dirname(path[len(self.dlpath):]))
+        else:
+            folder = os.path.basename(os.path.dirname(path))
+
+        # The folder contains the tokens needed to perform
+        # matching of project, gitref, etc.
+        rinfo = re.findall(r'(?P<tag>[^-]+)-(?P<val>.*?)__', folder)
+        if rinfo is None or len(rinfo) == 0:
+            print('Incorrect folder/file name format for %s' % folder)
+            return None
+
+        info = dict(rinfo)
+
+        # Ignore AppVeyor Debug builds
+        if info.get('bldtype', '').lower() == 'debug':
+            print('Ignoring debug artifact %s' % folder)
+            return None
+
+        tag = info.get('tag', None)
+        if tag is not None and (len(tag) == 0 or tag.startswith('$(')):
+            # AppVeyor doesn't substite $(APPVEYOR_REPO_TAG_NAME)
+            # with an empty value when not set, it leaves that token
+            # in the string - so translate that to no tag.
+            del info['tag']
+
+        # Perform matching
+        unmatched = list()
+        for m,v in self.match.items():
+            if m not in info or info[m] != v:
+                unmatched.append(m)
+
+        # Make sure all matches were satisfied, unless this is a
+        # common artifact.
+        if info.get('p', '') != 'common' and len(unmatched) > 0:
+            # print('%s: %s did not match %s' % (info.get('p', None), folder, unmatched))
+            return None
+
+        return Artifact(self, path, info)
+
+
+    def collect_s3(self):
+        """ Collect and download build-artifacts from S3 based on git reference """
+        print('Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket))
+        self.s3 = boto3.resource('s3')
+        self.s3_bucket = self.s3.Bucket(s3_bucket)
+        self.s3_client = boto3.client('s3')
+        for item in self.s3_client.list_objects(Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'):
+            self.collect_single(item.get('Key'))
+
+        for a in self.artifacts:
+            a.download()
+
+    def collect_local(self, path, req_tag=True):
+        """ Collect artifacts from a local directory possibly previously
+        collected from s3 """
+        for f in [os.path.join(dp, f) for dp, dn, filenames in os.walk(path) for f in filenames]:
+            if not os.path.isfile(f):
+                continue
+            self.collect_single(f, req_tag)
+
+
+class Package (object):
+    """ Generic Package class
+        A Package is a working container for one or more output
+        packages for a specific package type (e.g., nuget) """
+
+    def __init__ (self, version, arts, ptype):
+        super(Package, self).__init__()
+        self.version = version
+        self.arts = arts
+        self.ptype = ptype
+        # These may be overwritten by specific sub-classes:
+        self.artifacts = arts.artifacts
+        # Staging path, filled in later.
+        self.stpath = None
+        self.kv = {'version': version}
+        self.files = dict()
+
+    def add_file (self, file):
+        self.files[file] = True
+
+    def build (self):
+        """ Build package output(s), return a list of paths to built packages """
+        raise NotImplementedError
+
+    def cleanup (self):
+        """ Optional cleanup routine for removing temporary files, etc. """
+        pass
+
+    def verify (self, path):
+        """ Optional post-build package verifier """
+        pass
+
+    def render (self, fname, destpath='.'):
+        """ Render template in file fname and save to destpath/fname,
+        where destpath is relative to stpath """
+
+        outf = os.path.join(self.stpath, destpath, fname)
+
+        if not os.path.isdir(os.path.dirname(outf)):
+            os.makedirs(os.path.dirname(outf), 0o0755)
+
+        with open(os.path.join('templates', fname), 'r') as tf:
+            tmpl = Template(tf.read())
+        with open(outf, 'w') as of:
+            of.write(tmpl.substitute(self.kv))
+
+        self.add_file(outf)
+
+
+    def copy_template (self, fname, target_fname=None, destpath='.'):
+        """ Copy template file to destpath/fname
+        where destpath is relative to stpath """
+
+        if target_fname is None:
+            target_fname = fname
+        outf = os.path.join(self.stpath, destpath, target_fname)
+
+        if not os.path.isdir(os.path.dirname(outf)):
+            os.makedirs(os.path.dirname(outf), 0o0755)
+
+        shutil.copy(os.path.join('templates', fname), outf)
+
+        self.add_file(outf)
+
+
+class NugetPackage (Package):
+    """ All platforms, archs, et.al, are bundled into one set of
+        NuGet output packages: "main", redist and symbols """
+    def __init__ (self, version, arts):
+        if version.startswith('v'):
+            version = version[1:] # Strip v prefix
+        super(NugetPackage, self).__init__(version, arts, "nuget")
+
+    def cleanup(self):
+        if os.path.isdir(self.stpath):
+            shutil.rmtree(self.stpath)
+
+    def build (self, buildtype):
+        """ Build single NuGet package for all its artifacts. """
+
+        # NuGet removes the prefixing v from the version.
+        vless_version = self.kv['version']
+        if vless_version[0] == 'v':
+            vless_version = vless_version[1:]
+
+
+        self.stpath = tempfile.mkdtemp(prefix="out-", suffix="-%s" % buildtype,
+                                       dir=".")
+
+        self.render('librdkafka.redist.nuspec')
+        self.copy_template('librdkafka.redist.targets',
+                           destpath=os.path.join('build', 'native'))
+        self.copy_template('librdkafka.redist.props',
+                           destpath='build')
+        for f in ['../../README.md', '../../CONFIGURATION.md', '../../LICENSES.txt']:
+            shutil.copy(f, self.stpath)
+
+        # Generate template tokens for artifacts
+        for a in self.arts.artifacts:
+            if 'bldtype' not in a.info:
+                a.info['bldtype'] = 'release'
+
+            a.info['variant'] = '%s-%s-%s' % (a.info.get('plat'),
+                                              a.info.get('arch'),
+                                              a.info.get('bldtype'))
+            if 'toolset' not in a.info:
+                a.info['toolset'] = 'v120'
+
+        mappings = [
+            [{'arch': 'x64', 'plat': 'linux', 'fname_startswith': 'librdkafka.tar.gz'}, './include/librdkafka/rdkafka.h', 'build/native/include/librdkafka/rdkafka.h'],
+            [{'arch': 'x64', 'plat': 'linux', 'fname_startswith': 'librdkafka.tar.gz'}, './include/librdkafka/rdkafkacpp.h', 'build/native/include/librdkafka/rdkafkacpp.h'],
+
+            [{'arch': 'x64', 'plat': 'osx', 'fname_startswith': 'librdkafka.tar.gz'}, './lib/librdkafka.dylib', 'runtimes/osx-x64/native/librdkafka.dylib'],
+            [{'arch': 'x64', 'plat': 'linux', 'fname_startswith': 'librdkafka-debian9.tgz'}, './lib/librdkafka.so.1', 'runtimes/linux-x64/native/debian9-librdkafka.so'],
+            [{'arch': 'x64', 'plat': 'linux', 'fname_startswith': 'librdkafka.tar.gz'}, './lib/librdkafka.so.1', 'runtimes/linux-x64/native/librdkafka.so'],
+
+            [{'arch': 'x64', 'plat': 'win7', 'fname_startswith': 'msvcr120.zip'}, 'msvcr120.dll', 'runtimes/win7-x64/native/msvcr120.dll'],
+            # matches librdkafka.redist.{VER}.nupkg
+            [{'arch': 'x64', 'plat': 'win7', 'fname_startswith': 'librdkafka.redist'}, 'build/native/bin/v120/x64/Release/librdkafka.dll', 'runtimes/win7-x64/native/librdkafka.dll'],
+            [{'arch': 'x64', 'plat': 'win7', 'fname_startswith': 'librdkafka.redist'}, 'build/native/bin/v120/x64/Release/librdkafkacpp.dll', 'runtimes/win7-x64/native/librdkafkacpp.dll'],
+            [{'arch': 'x64', 'plat': 'win7', 'fname_startswith': 'librdkafka.redist'}, 'build/native/bin/v120/x64/Release/zlib.dll', 'runtimes/win7-x64/native/zlib.dll'],
+            # matches librdkafka.{VER}.nupkg
+            [{'arch': 'x64', 'plat': 'win7', 'fname_startswith': 'librdkafka', 'fname_excludes': ['redist', 'symbols']},
+             'build/native/lib/v120/x64/Release/librdkafka.lib', 'build/native/lib/win7/x64/win7-x64-Release/v120/librdkafka.lib'],
+            [{'arch': 'x64', 'plat': 'win7', 'fname_startswith': 'librdkafka', 'fname_excludes': ['redist', 'symbols']},
+             'build/native/lib/v120/x64/Release/librdkafkacpp.lib', 'build/native/lib/win7/x64/win7-x64-Release/v120/librdkafkacpp.lib'],
+
+            [{'arch': 'x86', 'plat': 'win7', 'fname_startswith': 'msvcr120.zip'}, 'msvcr120.dll', 'runtimes/win7-x86/native/msvcr120.dll'],
+            # matches librdkafka.redist.{VER}.nupkg
+            [{'arch': 'x86', 'plat': 'win7', 'fname_startswith': 'librdkafka.redist'}, 'build/native/bin/v120/Win32/Release/librdkafka.dll', 'runtimes/win7-x86/native/librdkafka.dll'],
+            [{'arch': 'x86', 'plat': 'win7', 'fname_startswith': 'librdkafka.redist'}, 'build/native/bin/v120/Win32/Release/librdkafkacpp.dll', 'runtimes/win7-x86/native/librdkafkacpp.dll'],
+            [{'arch': 'x86', 'plat': 'win7', 'fname_startswith': 'librdkafka.redist'}, 'build/native/bin/v120/Win32/Release/zlib.dll', 'runtimes/win7-x86/native/zlib.dll'],
+            # matches librdkafka.{VER}.nupkg
+            [{'arch': 'x86', 'plat': 'win7', 'fname_startswith': 'librdkafka', 'fname_excludes': ['redist', 'symbols']}, 
+            'build/native/lib/v120/Win32/Release/librdkafka.lib', 'build/native/lib/win7/x86/win7-x86-Release/v120/librdkafka.lib'],
+            [{'arch': 'x86', 'plat': 'win7', 'fname_startswith': 'librdkafka', 'fname_excludes': ['redist', 'symbols']}, 
+            'build/native/lib/v120/Win32/Release/librdkafkacpp.lib', 'build/native/lib/win7/x86/win7-x86-Release/v120/librdkafkacpp.lib']
+        ]
+
+        for m in mappings:
+            attributes = m[0]
+            fname_startswith = attributes['fname_startswith']
+            del attributes['fname_startswith']
+            fname_excludes = []
+            if 'fname_excludes' in attributes:
+                fname_excludes = attributes['fname_excludes']
+                del attributes['fname_excludes']
+
+            artifact = None
+            for a in self.arts.artifacts:
+                found = True
+
+                for attr in attributes:
+                    if a.info[attr] != attributes[attr]:
+                        found = False
+                        break
+
+                if not a.fname.startswith(fname_startswith):
+                    found = False
+
+                for exclude in fname_excludes:
+                    if exclude in a.fname:
+                        found = False
+                        break
+
+                if found:
+                    artifact = a
+                    break
+
+            if artifact is None:
+                raise Exception('unable to find file in archive %s with tags %s that starts with "%s"' % (a.fname, str(attributes), fname_startswith))
+
+            outf = os.path.join(self.stpath, m[2])
+            member = m[1]
+            try:
+                zfile.ZFile.extract(artifact.lpath, member, outf)
+            except KeyError as e:
+                raise Exception('file not found in archive %s: %s. Files in archive are: %s' % (artifact.lpath, e, zfile.ZFile(artifact.lpath).getnames()))
+
+        print('Tree extracted to %s' % self.stpath)
+
+        # After creating a bare-bone nupkg layout containing the artifacts
+        # and some spec and props files, call the 'nuget' utility to
+        # make a proper nupkg of it (with all the metadata files).
+        subprocess.check_call("./nuget.sh pack %s -BasePath '%s' -NonInteractive" %  \
+                              (os.path.join(self.stpath, 'librdkafka.redist.nuspec'),
+                               self.stpath), shell=True)
+                               
+        return 'librdkafka.redist.%s.nupkg' % vless_version
+
+    def verify (self, path):
+        """ Verify package """
+        expect = [
+            "librdkafka.redist.nuspec",
+            "LICENSES.txt",
+            "build/librdkafka.redist.props",
+            "build/native/librdkafka.redist.targets",
+            "build/native/include/librdkafka/rdkafka.h",
+            "build/native/include/librdkafka/rdkafkacpp.h",
+            "build/native/lib/win7/x64/win7-x64-Release/v120/librdkafka.lib",
+            "build/native/lib/win7/x64/win7-x64-Release/v120/librdkafkacpp.lib",
+            "build/native/lib/win7/x86/win7-x86-Release/v120/librdkafka.lib",
+            "build/native/lib/win7/x86/win7-x86-Release/v120/librdkafkacpp.lib",
+            "runtimes/linux-x64/native/debian9-librdkafka.so",
+            "runtimes/linux-x64/native/librdkafka.so",
+            "runtimes/osx-x64/native/librdkafka.dylib",
+            "runtimes/win7-x64/native/librdkafka.dll",
+            "runtimes/win7-x64/native/librdkafkacpp.dll",
+            "runtimes/win7-x64/native/msvcr120.dll",
+            "runtimes/win7-x64/native/zlib.dll",
+            "runtimes/win7-x86/native/librdkafka.dll",
+            "runtimes/win7-x86/native/librdkafkacpp.dll",
+            "runtimes/win7-x86/native/msvcr120.dll",
+            "runtimes/win7-x86/native/zlib.dll"]
+
+        missing = list()		
+        with zfile.ZFile(path, 'r') as zf:		
+            print('Verifying %s:' % path)		
+        
+            # Zipfiles may url-encode filenames, unquote them before matching.		
+            pkgd = [urllib.unquote(x) for x in zf.getnames()]		
+            missing = [x for x in expect if x not in pkgd]		
+        
+        if len(missing) > 0:		
+            print('Missing files in package %s:\n%s' % (path, '\n'.join(missing)))		
+            return False		
+        else:		
+            print('OK - %d expected files found' % len(expect))		
+            return True

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/nuget/release.py
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/nuget/release.py b/thirdparty/librdkafka-0.11.4/packaging/nuget/release.py
new file mode 100755
index 0000000..692ee6b
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/nuget/release.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+#
+#
+# NuGet release packaging tool.
+# Creates a NuGet package from CI artifacts on S3.
+#
+
+
+import sys
+import argparse
+import packaging
+
+
+dry_run = False
+
+
+
+if __name__ == '__main__':
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--no-s3", help="Don't collect from S3", action="store_true")
+    parser.add_argument("--dry-run",
+                        help="Locate artifacts but don't actually download or do anything",
+                        action="store_true")
+    parser.add_argument("--directory", help="Download directory (default: dl-<tag>)", default=None)
+    parser.add_argument("--no-cleanup", help="Don't clean up temporary folders", action="store_true")
+    parser.add_argument("--sha", help="Also match on this git sha1", default=None)
+    parser.add_argument("--nuget-version", help="The nuget package version (defaults to same as tag)", default=None)
+    parser.add_argument("tag", help="Git tag to collect")
+
+    args = parser.parse_args()
+    dry_run = args.dry_run
+    if not args.directory:
+        args.directory = 'dl-%s' % args.tag
+
+    match = {'tag': args.tag}
+    if args.sha is not None:
+        match['sha'] = args.sha
+
+    arts = packaging.Artifacts(match, args.directory)
+
+    # Collect common local artifacts, such as support files.
+    arts.collect_local('common', req_tag=False)
+
+    if not args.no_s3:
+        arts.collect_s3()
+    else:
+        arts.collect_local(arts.dlpath)
+
+    if len(arts.artifacts) == 0:
+        raise ValueError('No artifacts found for %s' % match)
+
+    print('Collected artifacts:')
+    for a in arts.artifacts:
+        print(' %s' % a.lpath)
+    print('')
+
+    package_version = match['tag']
+    if args.nuget_version is not None:
+        package_version = args.nuget_version
+
+    print('')
+
+    if dry_run:
+        sys.exit(0)
+
+    print('Building packages:')
+
+    p = packaging.NugetPackage(package_version, arts)
+    pkgfile = p.build(buildtype='release')
+
+    if not args.no_cleanup:
+        p.cleanup()
+    else:
+        print(' --no-cleanup: leaving %s' % p.stpath)
+
+    print('')
+
+    if not p.verify(pkgfile):
+        print('Package failed verification.')
+        sys.exit(1)
+    else:
+        print('Created package: %s' % pkgfile)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/nuget/requirements.txt
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/nuget/requirements.txt b/thirdparty/librdkafka-0.11.4/packaging/nuget/requirements.txt
new file mode 100644
index 0000000..c892afd
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/nuget/requirements.txt
@@ -0,0 +1,2 @@
+boto3
+rpmfile

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/nuget/templates/librdkafka.redist.nuspec
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/nuget/templates/librdkafka.redist.nuspec b/thirdparty/librdkafka-0.11.4/packaging/nuget/templates/librdkafka.redist.nuspec
new file mode 100644
index 0000000..f48e523
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/nuget/templates/librdkafka.redist.nuspec
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8"?>
+<package xmlns="http://schemas.microsoft.com/packaging/2011/10/nuspec.xsd">
+  <metadata>
+    <id>librdkafka.redist</id>
+    <version>${version}</version>
+    <title>librdkafka - redistributable</title>
+    <authors>Magnus Edenhill, edenhill</authors>
+    <owners>Magnus Edenhill, edenhill</owners>
+    <requireLicenseAcceptance>false</requireLicenseAcceptance>
+    <licenseUrl>https://github.com/edenhill/librdkafka/blob/master/LICENSES.txt</licenseUrl>
+    <projectUrl>https://github.com/edenhill/librdkafka</projectUrl>
+    <description>The Apache Kafka C/C++ client library - redistributable</description>
+    <summary>The Apache Kafka C/C++ client library</summary>
+    <releaseNotes>Release of librdkafka</releaseNotes>
+    <copyright>Copyright 2012-2017</copyright>
+    <tags>native apache kafka librdkafka C C++ nativepackage</tags>
+  </metadata>
+  <files>
+    <file src="**" />
+  </files>
+</package>

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/nuget/templates/librdkafka.redist.props
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/nuget/templates/librdkafka.redist.props b/thirdparty/librdkafka-0.11.4/packaging/nuget/templates/librdkafka.redist.props
new file mode 100644
index 0000000..f6c0de0
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/nuget/templates/librdkafka.redist.props
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<Project ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup>
+    <Content Include="$(MSBuildThisFileDirectory)..\runtimes\win7-x86\native\*">
+      <Link>librdkafka\x86\%(Filename)%(Extension)</Link>
+      <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
+    </Content>
+    <Content Include="$(MSBuildThisFileDirectory)..\runtimes\win7-x64\native\*">
+      <Link>librdkafka\x64\%(Filename)%(Extension)</Link>
+      <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
+    </Content>
+  </ItemGroup>
+  <ItemDefinitionGroup>
+    <ClCompile>
+      <AdditionalIncludeDirectories>$(MSBuildThisFileDirectory)include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+    </ClCompile>
+  </ItemDefinitionGroup>
+</Project>

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/nuget/templates/librdkafka.redist.targets
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/nuget/templates/librdkafka.redist.targets b/thirdparty/librdkafka-0.11.4/packaging/nuget/templates/librdkafka.redist.targets
new file mode 100644
index 0000000..632408d
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/nuget/templates/librdkafka.redist.targets
@@ -0,0 +1,19 @@
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemDefinitionGroup>
+    <Link>
+      <AdditionalDependencies Condition="'$(Platform)' == 'x64'">$(MSBuildThisFileDirectory)lib\win7\x64\win7-x64-Release\v120\librdkafka.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies Condition="'$(Platform)' != 'x64'">$(MSBuildThisFileDirectory)lib\win7\x86\win7-x86-Release\v120\librdkafka.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalLibraryDirectories Condition="'$(Platform)' == 'x64'">$(MSBuildThisFileDirectory)lib\win7\x64\win7-x64-Release\v120;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+      <AdditionalLibraryDirectories Condition="'$(Platform)' != 'x64'">$(MSBuildThisFileDirectory)lib\win7\x86\win7-x86-Release\v120;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+    </Link>
+    <ClCompile>
+      <AdditionalIncludeDirectories>$(MSBuildThisFileDirectory)include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+    </ClCompile>
+  </ItemDefinitionGroup>
+  <ItemGroup Condition="'$(Platform)' == 'x64'">
+    <ReferenceCopyLocalPaths Include="$(MSBuildThisFileDirectory)..\..\runtimes\win7-x64\native\librdkafka.dll" />
+  </ItemGroup>
+  <ItemGroup Condition="'$(Platform)' != 'x64'">
+    <ReferenceCopyLocalPaths Include="$(MSBuildThisFileDirectory)..\..\runtimes\win7-x86\native\librdkafka.dll" />
+  </ItemGroup>
+</Project>

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/nuget/zfile/__init__.py
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/nuget/zfile/__init__.py b/thirdparty/librdkafka-0.11.4/packaging/nuget/zfile/__init__.py
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/nuget/zfile/zfile.py
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/nuget/zfile/zfile.py b/thirdparty/librdkafka-0.11.4/packaging/nuget/zfile/zfile.py
new file mode 100644
index 0000000..8616078
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/nuget/zfile/zfile.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+
+import os
+import tarfile
+import zipfile
+import rpmfile
+
+class ZFile (object):
+    def __init__(self, path, mode='r', ext=None):
+        super(ZFile, self).__init__()
+
+        if ext is not None:
+            _ext = ext
+        else:
+            _ext = os.path.splitext(path)[-1]
+        if _ext.startswith('.'):
+            _ext = _ext[1:]
+
+        if zipfile.is_zipfile(path) or _ext == 'zip':
+            self.f = zipfile.ZipFile(path, mode)
+        elif tarfile.is_tarfile(path) or _ext in ('tar', 'tgz', 'gz'):
+            self.f = tarfile.open(path, mode)
+        elif _ext == 'rpm':
+            self.f = rpmfile.open(path, mode + 'b')
+        else:
+            raise ValueError('Unsupported file extension: %s' % path)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        if callable(getattr(self.f, 'close', None)):
+            self.f.close()
+
+    def getnames(self):
+        if isinstance(self.f, zipfile.ZipFile):
+            return self.f.namelist()
+        elif isinstance(self.f, tarfile.TarFile):
+            return self.f.getnames()
+        elif isinstance(self.f, rpmfile.RPMFile):
+            return [x.name for x in self.f.getmembers()]
+        else:
+            raise NotImplementedError
+
+    def headers(self):
+        if isinstance(self.f, rpmfile.RPMFile):
+            return self.f.headers
+        else:
+            return dict()
+
+    def extract_to(self, member, path):
+        """ Extract compress file's \p member to \p path
+            If \p path is a directory the member's basename will used as
+            filename, otherwise path is considered the full file path name. """
+
+        if not os.path.isdir(os.path.dirname(path)):
+            os.makedirs(os.path.dirname(path))
+
+        if os.path.isdir(path):
+            path = os.path.join(path, os.path.basename(member))
+
+        with open(path, 'wb') as of:
+            if isinstance(self.f, zipfile.ZipFile):
+                zf = self.f.open(member)
+            else:
+                zf = self.f.extractfile(member)
+
+            while True:
+                b = zf.read(1024*100)
+                if b:
+                    of.write(b)
+                else:
+                    break
+
+            zf.close()
+
+
+    @classmethod
+    def extract (cls, zpath, member, outpath):
+        """
+        Extract file member (full internal path) to output from
+        archive zpath.
+        """
+
+        with ZFile(zpath) as zf:
+            zf.extract_to(member, outpath)
+
+
+    @classmethod
+    def compress (cls, zpath, paths, stripcnt=0, ext=None):
+        """
+        Create new compressed file \p zpath containing files in \p paths
+        """
+
+        with ZFile(zpath, 'w', ext=ext) as zf:
+            for p in paths:
+                outp = os.path.sep.join(p.split(os.path.sep)[stripcnt:])
+                print('zip %s to %s (stripcnt %d)' % (p, outp, stripcnt))
+                zf.f.write(p, outp)
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/rpm/.gitignore
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/rpm/.gitignore b/thirdparty/librdkafka-0.11.4/packaging/rpm/.gitignore
new file mode 100644
index 0000000..cf122d0
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/rpm/.gitignore
@@ -0,0 +1,3 @@
+*.log
+available_pkgs
+installed_pkgs

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/rpm/Makefile
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/rpm/Makefile b/thirdparty/librdkafka-0.11.4/packaging/rpm/Makefile
new file mode 100644
index 0000000..24e9ae6
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/rpm/Makefile
@@ -0,0 +1,81 @@
+PACKAGE_NAME?=	librdkafka
+VERSION?=	$(shell ../get_version.py ../../src/rdkafka.h)
+
+# Jenkins CI integration
+BUILD_NUMBER?= 1
+
+MOCK_CONFIG?=default
+
+RESULT_DIR?=pkgs-$(VERSION)-$(BUILD_NUMBER)-$(MOCK_CONFIG)
+
+all: rpm
+
+
+SOURCES:
+	mkdir -p SOURCES
+
+archive: SOURCES
+	cd ../../ && \
+	git archive --prefix=$(PACKAGE_NAME)-$(VERSION)/ \
+		-o packaging/rpm/SOURCES/$(PACKAGE_NAME)-$(VERSION).tar.gz HEAD
+
+
+build_prepare: archive
+	mkdir -p $(RESULT_DIR)
+	rm -f $(RESULT_DIR)/$(PACKAGE_NAME)*.rpm
+
+
+srpm: build_prepare
+	/usr/bin/mock \
+		-r $(MOCK_CONFIG) \
+		--define "__version $(VERSION)" \
+		--define "__release $(BUILD_NUMBER)" \
+		--resultdir=$(RESULT_DIR) \
+		--no-clean --no-cleanup-after \
+		--buildsrpm \
+		--spec=librdkafka.spec \
+		--sources=SOURCES || \
+	(tail -n 100 pkgs-$(VERSION)*/*log ; false)
+	@echo "======= Source RPM now available in $(RESULT_DIR) ======="
+
+rpm: srpm
+	/usr/bin/mock \
+		-r $(MOCK_CONFIG) \
+		--define "__version $(VERSION)"\
+		--define "__release $(BUILD_NUMBER)"\
+		--resultdir=$(RESULT_DIR) \
+		--no-clean --no-cleanup-after \
+		--rebuild $(RESULT_DIR)/$(PACKAGE_NAME)*.src.rpm || \
+	(tail -n 100 pkgs-$(VERSION)*/*log ; false)
+	@echo "======= Binary RPMs now available in $(RESULT_DIR) ======="
+
+copy-artifacts:
+	cp $(RESULT_DIR)/*rpm ../../artifacts/
+
+clean:
+	rm -rf SOURCES
+	/usr/bin/mock -r $(MOCK_CONFIG) --clean
+
+distclean: clean
+	rm -f build.log root.log state.log available_pkgs installed_pkgs \
+		*.rpm *.tar.gz
+
+# Prepare ubuntu 14.04 for building RPMs with mock.
+#  - older versions of mock needs the config file to reside in /etc/mock,
+#    so we copy it there.
+#  - add a mock system group (if not already exists)
+#  - add the current user to the mock group.
+#  - prepare mock environment with some needed packages.
+# NOTE: This target should be run with sudo.
+prepare_ubuntu:
+	apt-get -qq update
+	apt-get install -y -qq mock make git python-lzma
+	cp *.cfg /etc/mock/
+	addgroup --system mock || true
+	adduser $$(whoami) mock
+	/usr/bin/mock -r $(MOCK_CONFIG) --init
+	/usr/bin/mock -r $(MOCK_CONFIG) --no-cleanup-after --install epel-release shadow-utils
+
+prepare_centos:
+	yum install -y -q mock make git
+	cp *.cfg /etc/mock/

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/rpm/el7-x86_64.cfg
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/rpm/el7-x86_64.cfg b/thirdparty/librdkafka-0.11.4/packaging/rpm/el7-x86_64.cfg
new file mode 100644
index 0000000..5022827
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/rpm/el7-x86_64.cfg
@@ -0,0 +1,40 @@
+config_opts['root'] = 'el7-x86_64'
+config_opts['target_arch'] = 'x86_64'
+config_opts['legal_host_arches'] = ('x86_64',)
+config_opts['chroot_setup_cmd'] = 'install @buildsys-build'
+config_opts['dist'] = 'el7'  # only useful for --resultdir variable subst
+config_opts['releasever'] = '7'
+config_opts['docker_unshare_warning'] = False
+config_opts['nosync'] = True
+
+config_opts['yum.conf'] = """
+[main]
+keepcache=1
+debuglevel=2
+reposdir=/dev/null
+logfile=/var/log/yum.log
+retries=15
+obsoletes=1
+gpgcheck=0
+assumeyes=1
+syslog_ident=mock
+syslog_device=
+mdpolicy=group:primary
+
+# repos
+[base]
+name=BaseOS
+mirrorlist=http://mirrorlist.centos.org/?release=7&arch=x86_64&repo=os
+failovermethod=priority
+
+[updates]
+name=updates
+enabled=1
+mirrorlist=http://mirrorlist.centos.org/?release=7&arch=x86_64&repo=updates
+failovermethod=priority
+
+[epel]
+name=epel
+mirrorlist=http://mirrors.fedoraproject.org/mirrorlist?repo=epel-7&arch=x86_64
+failovermethod=priority
+"""

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/rpm/librdkafka.spec
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/rpm/librdkafka.spec b/thirdparty/librdkafka-0.11.4/packaging/rpm/librdkafka.spec
new file mode 100644
index 0000000..0591a61
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/rpm/librdkafka.spec
@@ -0,0 +1,104 @@
+Name:    librdkafka
+Version: %{__version}
+Release: %{__release}%{?dist}
+%define soname 1
+
+Summary: The Apache Kafka C library
+Group:   Development/Libraries/C and C++
+License: BSD-2-Clause
+URL:     https://github.com/edenhill/librdkafka
+Source:	 librdkafka-%{version}.tar.gz
+
+BuildRequires: zlib-devel libstdc++-devel gcc >= 4.1 gcc-c++ openssl-devel cyrus-sasl-devel lz4-devel python
+BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
+
+%define _source_payload w9.gzdio
+%define _binary_payload w9.gzdio
+
+%description
+librdkafka is the C/C++ client library implementation of the Apache Kafka protocol, containing both Producer and Consumer support.
+
+
+%package -n %{name}%{soname}
+Summary: The Apache Kafka C library
+Group:   Development/Libraries/C and C++
+Requires: zlib libstdc++ cyrus-sasl
+# openssl libraries were extract to openssl-libs in RHEL7
+%if 0%{?rhel} >= 7
+Requires: openssl-libs
+%else
+Requires: openssl
+%endif
+
+%description -n %{name}%{soname}
+librdkafka is the C/C++ client library implementation of the Apache Kafka protocol, containing both Producer and Consumer support.
+
+
+%package -n %{name}-devel
+Summary: The Apache Kafka C library (Development Environment)
+Group:   Development/Libraries/C and C++
+Requires: %{name}%{soname} = %{version}
+
+%description -n %{name}-devel
+librdkafka is the C/C++ client library implementation of the Apache Kafka protocol, containing both Producer and Consumer support.
+
+This package contains headers and libraries required to build applications
+using librdkafka.
+
+
+%prep
+%setup -q -n %{name}-%{version}
+
+%configure
+
+%build
+make
+
+%install
+rm -rf %{buildroot}
+DESTDIR=%{buildroot} make install
+
+%clean
+rm -rf %{buildroot}
+
+%post   -n %{name}%{soname} -p /sbin/ldconfig
+%postun -n %{name}%{soname} -p /sbin/ldconfig
+
+%files -n %{name}%{soname}
+%defattr(444,root,root)
+%{_libdir}/librdkafka.so.%{soname}
+%{_libdir}/librdkafka++.so.%{soname}
+%defattr(-,root,root)
+%doc README.md CONFIGURATION.md INTRODUCTION.md
+%doc LICENSE LICENSE.pycrc LICENSE.queue LICENSE.snappy LICENSE.tinycthread LICENSE.wingetopt
+
+%defattr(-,root,root)
+#%{_bindir}/rdkafka_example
+#%{_bindir}/rdkafka_performance
+
+
+%files -n %{name}-devel
+%defattr(-,root,root)
+%{_includedir}/librdkafka
+%defattr(444,root,root)
+%{_libdir}/librdkafka.a
+%{_libdir}/librdkafka.so
+%{_libdir}/librdkafka++.a
+%{_libdir}/librdkafka++.so
+%{_libdir}/pkgconfig/rdkafka++.pc
+%{_libdir}/pkgconfig/rdkafka.pc
+%{_libdir}/pkgconfig/rdkafka-static.pc
+%{_libdir}/pkgconfig/rdkafka++-static.pc
+
+%changelog
+* Thu Apr 09 2015 Eduard Iskandarov <e....@corp.mail.ru> 0.8.6-0
+- 0.8.6 simplify build process
+
+* Fri Oct 24 2014 Magnus Edenhill <rd...@edenhill.se> 0.8.5-0
+- 0.8.5 release
+
+* Mon Aug 18 2014 Magnus Edenhill <rd...@edenhill.se> 0.8.4-0
+- 0.8.4 release
+
+* Mon Mar 17 2014 Magnus Edenhill <vk...@edenhill.se> 0.8.3-0
+- Initial RPM package

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/tools/build-debian.sh
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/tools/build-debian.sh b/thirdparty/librdkafka-0.11.4/packaging/tools/build-debian.sh
new file mode 100755
index 0000000..ea0108d
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/tools/build-debian.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# Build librdkafka on a bare-bone Debian host, such as the microsoft/dotnet:2-sdk
+# Docker image.
+#
+# WITH openssl 1.0, zlib
+# WITHOUT libsasl2, lz4(ext, using builtin instead)
+#
+# Usage (from top-level librdkafka dir):
+#   docker run -it -v $PWD:/v microsoft/dotnet:2-sdk /v/packaging/tools/build-debian.sh /v /v/librdkafka-debian9.tgz
+#
+
+
+set -ex
+
+LRK_DIR=$1
+OUT_TGZ=$2
+
+if [[ ! -f $LRK_DIR/configure.librdkafka || -z $OUT_TGZ ]]; then
+    echo "Usage: $0 <librdkafka-root-direcotry> <output-tgz>"
+    exit 1
+fi
+
+set -u
+
+apt-get update
+apt-get install -y gcc g++ libssl1.0-dev zlib1g-dev python2.7 git-core make
+
+
+# Copy the librdkafka git archive to a new location to avoid messing
+# up the librdkafka working directory.
+
+BUILD_DIR=$(mktemp -d)
+
+pushd $BUILD_DIR
+
+DEST_DIR=$PWD/dest
+mkdir -p $DEST_DIR
+
+(cd $LRK_DIR ; git archive --format tar HEAD) | tar xf -
+
+./configure --disable-lz4 --prefix $DEST_DIR
+make -j
+make install
+
+# Tar up the output directory
+pushd $DEST_DIR
+tar cvzf $OUT_TGZ .
+popd # $DEST_DIR
+
+popd # $BUILD_DIR
+
+rm -rf "$BUILD_DIR"


[45/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mklove/modules/configure.cxx
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.cxx b/thirdparty/librdkafka-0.11.1/mklove/modules/configure.cxx
deleted file mode 100644
index a38ac73..0000000
--- a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.cxx
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-#
-# C++ detection
-#
-# This script simply limits the checks of configure.cc
-
-
-MKL_CC_WANT_CXX=1

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mklove/modules/configure.fileversion
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.fileversion b/thirdparty/librdkafka-0.11.1/mklove/modules/configure.fileversion
deleted file mode 100644
index 9bea117..0000000
--- a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.fileversion
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/bin/bash
-#
-# Reads version from file and sets variables accordingly
-# The first non-commented line in the file is expected to be the version string.
-# Arguments:
-#    filename
-#    STR_VERSION_VARIABLE_NAME
-#  [ HEX_VERSION_VARIABLE_NAME ]
-#
-# Example: Set string version in variable named "MYVERSION_STR" and
-#          the hex representation in "MYVERSION"
-#   mkl_require VERSION.txt MYVERSION_STR MYVERSION
-
-if [[ -z "$2" ]]; then
-    mkl_fail "fileversion" "none" "fail" "Missing argument(s), expected: FILENAME STR_VER HEX_VER"
-    return 0
-fi
-
-fileversion_file="$1"
-fileversion_strvar="$2"
-fileversion_hexvar="$3"
-
-function checks {
-    mkl_check_begin "fileversion" "" "no-cache" "version from file $fileversion_file"
-
-    if [[ ! -s $fileversion_file ]]; then
-        mkl_check_failed "fileversion" "" "fail" \
-            "Version file $fileversion_file is not readable"
-        return 1
-    fi
-
-    local orig=$(grep -v ^\# "$fileversion_file" | grep -v '^$' | head -1)
-    # Strip v prefix if any
-    orig=${orig#v}
-
-    # Try to decode version string into hex
-    # Supported format is "[v]NN.NN.NN[.NN]"
-    if [[ ! -z $fileversion_hexvar ]]; then
-        local hex=""
-        local s=${orig#v} # Strip v prefix, if any.
-        local ncnt=0
-        local n=
-        for n in ${s//./ } ; do
-            if [[ ! ( "$n" =~ ^[0-9][0-9]?$ ) ]]; then
-                mkl_check_failed "fileversion" "" "fail" \
-                    "$fileversion_file: Could not decode '$orig' into hex version, expecting format 'NN.NN.NN[.NN]'"
-                return 1
-            fi
-            hex="$hex$(printf %02x $n)"
-            ncnt=$(expr $ncnt + 1)
-        done
-
-        if [[ ! -z $hex ]]; then
-            # Finish all four bytess
-            for n in {$ncnt..4} ; do
-                hex="$hex$(printf %02x 0)"
-            done
-            mkl_allvar_set "fileversion" "$fileversion_hexvar" "0x$hex"
-        fi
-    fi
-
-    mkl_allvar_set "fileversion" "$fileversion_strvar" "$orig"
-
-    mkl_check_done "fileversion" "" "cont" "ok" "${!fileversion_strvar}"
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mklove/modules/configure.gitversion
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.gitversion b/thirdparty/librdkafka-0.11.1/mklove/modules/configure.gitversion
deleted file mode 100644
index b6ac486..0000000
--- a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.gitversion
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# Sets version variable from git information.
-# Optional arguments:
-#   "as"
-#   VARIABLE_NAME
-#
-# Example: Set version in variable named "MYVERSION":
-#   mkl_require gitversion as MYVERSION
-
-if [[ $1 == "as" ]]; then
-    __MKL_GITVERSION_VARNAME="$2"
-else
-    __MKL_GITVERSION_VARNAME="VERSION"
-fi
-
-function checks {
-    mkl_allvar_set "gitversion" "$__MKL_GITVERSION_VARNAME" "$(git describe --abbrev=6 --tags HEAD --always)"
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mklove/modules/configure.good_cflags
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.good_cflags b/thirdparty/librdkafka-0.11.1/mklove/modules/configure.good_cflags
deleted file mode 100644
index c8587f2..0000000
--- a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.good_cflags
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-#
-# Provides some known-good CFLAGS
-# Sets:
-#  CFLAGS
-#  CXXFLAGS
-#  CPPFLAGS
-
-
-function checks {
-    mkl_mkvar_append CPPFLAGS CPPFLAGS \
-        "-Wall -Wsign-compare -Wfloat-equal -Wpointer-arith -Wcast-align"
-
-    if [[ $MKL_WANT_WERROR = "y" ]]; then
-        mkl_mkvar_append CPPFLAGS CPPFLAGS \
-            "-Werror"
-    fi
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mklove/modules/configure.host
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.host b/thirdparty/librdkafka-0.11.1/mklove/modules/configure.host
deleted file mode 100644
index 4dfdce8..0000000
--- a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.host
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/bin/bash
-#
-# Host OS support
-# Sets:
-#  HOST
-#  BUILD
-#  TARGET
-
-# FIXME: No need for this right now
-#mkl_require host_linux
-#mkl_require host_osx
-#mkl_require host_cygwin
-
-#mkl_option "Cross-compilation" "mk:HOST_OS" "--host-os=osname" "Host OS (linux,osx,cygwin,..)" "auto"
-
-
-# autoconf compatibility - does nothing at this point
-mkl_option "Cross-compilation" "mk:HOST" "--host=HOST" "Configure to build programs to run on HOST (no-op)"
-mkl_option "Cross-compilation" "mk:BUILD" "--build=BUILD" "Configure for building on BUILD (no-op)"
-mkl_option "Cross-compilation" "mk:TARGET" "--target=TARGET" "Configure for building cross-toolkits for platform TARGET (no-op)"
-
-function checks {
-    # Try to figure out what OS/distro we are running on.
-    mkl_check_begin "distro" "" "no-cache" "OS or distribution"
-
-    solib_ext=.so
-
-    # Try lsb_release
-    local sys
-    sys=$(lsb_release -is 2>/dev/null)
-    if [[ $? -gt 0 ]]; then
-        # That didnt work, try uname.
-        local kn=$(uname -s)
-        case $kn in
-            Linux)
-                sys=Linux
-                solib_ext=.so
-                ;;
-            Darwin)
-                sys=osx
-                solib_ext=.dylib
-                ;;
-            CYGWIN*)
-                sys=Cygwin
-                solib_ext=.dll
-                ;;
-            *)
-                sys="$kn"
-                solib_ext=.so
-                ;;
-        esac
-    fi
-
-    if [[ -z $sys ]]; then
-        mkl_check_failed "distro" "" "ignore" ""
-    else
-        mkl_check_done "distro" "" "ignore" "ok" "$sys"
-        mkl_mkvar_set "distro" "MKL_DISTRO" "$sys"
-        mkl_allvar_set "distro" "SOLIB_EXT" "$solib_ext"
-    fi
-}
-
-#function checks {
-#    mkl_check_begin "host" "HOST_OS" "no-cache" "host OS"
-#
-#    #
-#    # If --host-os=.. was not specified then this is most likely not a
-#    # a cross-compilation and we can base the host-os on the native OS.
-#    #
-#    if [[ $HOST_OS != "auto" ]]; then
-#        mkl_check_done "host" "HOST_OS" "cont" "ok" "$HOST_OS"
-#        return 0
-#    fi
-#
-#    kn=$(uname -s)
-#    case $kn in
-#        Linux)
-#            hostos=linux
-#            ;;
-#        Darwin)
-#            hostos=osx
-#            ;;
-#        CYGWIN*)
-#            hostos=cygwin
-#            ;;
-#        *)
-#            hostos="$(mkl_lower $kn)"
-#            mkl_err  "Unknown host OS kernel name: $kn"
-#            mkl_err0 "  Will attempt to load module host_$hostos anyway."
-#            mkl_err0 "  Please consider writing a configure.host_$hostos"
-#            ;;
-#    esac
-#
-#    if ! mkl_require --try "host_$hostos"; then
-#        # Module not found
-#        mkl_check_done "host" "HOST_OS" "cont" "failed" "$kn?"
-#    else
-#        # Module loaded
-#
-#        if mkl_func_exists "host_${hostos}_setup" ; then
-#            "host_${hostos}_setup"
-#        fi
-#
-#        mkl_check_done "host" "HOST_OS" "cont" "ok" "$hostos"
-#    fi
-#
-#    # Set HOST_OS var even if probing failed.
-#    mkl_mkvar_set "host" "HOST_OS" "$hostos"
-#}
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mklove/modules/configure.lib
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.lib b/thirdparty/librdkafka-0.11.1/mklove/modules/configure.lib
deleted file mode 100644
index 49ed293..0000000
--- a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.lib
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#
-# Module for building shared libraries
-# Sets:
-#  WITH_GNULD | WITH_OSXLD
-#  WITH_LDS  - linker script support
-mkl_require pic
-
-function checks {
-
-    mkl_mkvar_append LIB_LDFLAGS LIB_LDFLAGS '-shared'
-
-    # Check what arguments to pass to CC or LD for shared libraries
-    mkl_meta_set gnulib name "GNU-compatible linker options"
-    mkl_meta_set osxlib name "OSX linker options"
-
-    if mkl_compile_check gnulib WITH_GNULD cont CC \
-	"-shared -Wl,-soname,mkltest.0" "" ; then
-	# GNU linker
-	mkl_mkvar_append LIB_LDFLAGS LIB_LDFLAGS '-Wl,-soname,$(LIBFILENAME)'
-
-    elif mkl_compile_check osxlib WITH_OSXLD cont CC \
-	"-dynamiclib -Wl,-install_name,/tmp/mkltest.so.0" ; then
-	# OSX linker
-        mkl_mkvar_append LIB_LDFLAGS LIB_LDFLAGS '-dynamiclib -Wl,-install_name,$(DESTDIR)$(libdir)/$(LIBFILENAME)'
-    fi
-
-    # Check what argument is needed for passing linker script.
-    local ldsfile=$(mktemp _mkltmpXXXXXX)
-    echo "{
- global:
-  *;
-};
-" > $ldsfile
-
-    mkl_meta_set ldsflagvs name "GNU linker-script ld flag"
-    mkl_meta_set ldsflagm name "Solaris linker-script ld flag"
-    if mkl_compile_check ldsflagvs "" cont CC \
-	"-shared -Wl,--version-script=$ldsfile"; then
-	mkl_mkvar_set ldsflagvs LDFLAG_LINKERSCRIPT "-Wl,--version-script="
-	mkl_mkvar_set lib_lds WITH_LDS y
-    elif mkl_compile_check ldsflagm ""  ignore CC \
-	"-shared -Wl,-M$ldsfile"; then
-	mkl_mkvar_set ldsflagm LDFLAG_LINKERSCRIPT "-Wl,-M"
-	mkl_mkvar_set lib_lds WITH_LDS y
-    fi
-
-    rm -f "$ldsfile"
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mklove/modules/configure.parseversion
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.parseversion b/thirdparty/librdkafka-0.11.1/mklove/modules/configure.parseversion
deleted file mode 100644
index 0ee0f57..0000000
--- a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.parseversion
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-#
-# Parses the provided version string and creates variables accordingly.
-#  [ "hex2str" <fmt> ]  -- version-string is in hex (e.g., 0x00080300)
-#    version-string
-#    STR_VERSION_VARIABLE_NAME
-#  [ HEX_VERSION_VARIABLE_NAME ]
-#
-# Note: The version will also be set in MKL_APP_VERSION
-#
-# Example: Set string version in variable named "MYVERSION_STR" and
-#          the hex representation in "MYVERSION"
-#   mkl_require parseversion "$(head -1 VERSION.txt)" MYVERSION_STR MYVERSION
-
-if [[ $1 == "hex2str" ]]; then
-    parseversion_type="hex"
-    parseversion_fmt="${2}:END:%d%d%d%d"
-    shift
-    shift
-else
-    parseversion_type=""
-    parseversion_fmt="%d.%d.%d.%d"
-fi
-
-if [[ -z "$2" ]]; then
-    mkl_fail "parseversion" "none" "fail" "Missing argument(s)"
-    return 0
-fi
-
-parseversion_orig="$1"
-parseversion_strvar="$2"
-parseversion_hexvar="$3"
-
-function checks {
-    mkl_check_begin --verb "parsing" "parseversion" "" "no-cache" \
-        "version '$parseversion_orig'"
-
-    # Strip v prefix if any
-    orig=${parseversion_orig#v}
-
-    if [[ $orig == 0x* ]]; then
-        parseversion_type="hex"
-        orig=${orig#0x}
-    fi
-
-    if [[ -z $orig ]]; then
-        mkl_check_failed "parseversion" "" "fail" "Version string is empty"
-        return 1
-    fi
-
-    # If orig is in hex we construct a string format instead.
-    if [[ $parseversion_type == "hex" ]]; then
-        local s=$orig
-        local str=""
-        local vals=""
-        while [[ ! -z $s ]]; do
-            local n=${s:0:2}
-            s=${s:${#n}}
-            vals="${vals}$(printf %d 0x$n) "
-        done
-        str=$(printf "$parseversion_fmt" $vals)
-        orig=${str%:END:*}
-    fi
-
-
-    # Try to decode version string into hex
-    # Supported format is "[v]NN.NN.NN[.NN]"
-    if [[ ! -z $parseversion_hexvar ]]; then
-        local hex=""
-        local s=$orig
-        local ncnt=0
-        local n=
-        for n in ${s//./ } ; do
-            if [[ ! ( "$n" =~ ^[0-9][0-9]?$ ) ]]; then
-                mkl_check_failed "parseversion" "" "fail" \
-                    "Could not decode '$parseversion_orig' into hex version, expecting format 'NN.NN.NN[.NN]'"
-                return 1
-            fi
-            hex="$hex$(printf %02x $n)"
-            ncnt=$(expr $ncnt + 1)
-        done
-
-        if [[ ! -z $hex ]]; then
-            # Finish all four bytess
-            while [[ ${#hex} -lt 8 ]]; do
-                hex="$hex$(printf %02x 0)"
-            done
-            mkl_allvar_set "parseversion" "$parseversion_hexvar" "0x$hex"
-        fi
-    fi
-
-    mkl_allvar_set "parseversion" "$parseversion_strvar" "$orig"
-    mkl_allvar_set "parseversion" MKL_APP_VERSION "$orig"
-    mkl_check_done "parseversion" "" "cont" "ok" "${!parseversion_strvar}"
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mklove/modules/configure.pic
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.pic b/thirdparty/librdkafka-0.11.1/mklove/modules/configure.pic
deleted file mode 100644
index 8f138f8..0000000
--- a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.pic
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-#
-# Checks if -fPIC is supported, and if so turns it on.
-#
-# Sets:
-#  HAVE_PIC
-#  CPPFLAGS
-#
-
-function checks {
-
-    if mkl_compile_check PIC HAVE_PIC disable CC "-fPIC" "" ; then
-        mkl_mkvar_append CPPFLAGS CPPFLAGS "-fPIC"
-    fi
-}
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mklove/modules/configure.socket
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.socket b/thirdparty/librdkafka-0.11.1/mklove/modules/configure.socket
deleted file mode 100644
index f0777ab..0000000
--- a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.socket
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Provides proper compiler flags for socket support, e.g. socket(3).
-
-function checks {
-
-    local src="
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <unistd.h>
-void foo (void) {
-   int s = socket(0, 0, 0);
-   close(s);
-}"
-    if ! mkl_compile_check socket "" cont CC "" "$src"; then
-	if mkl_compile_check --ldflags="-lsocket -lnsl" socket_nsl "" fail CC "" "$src"; then
-	    mkl_mkvar_append socket_nsl LIBS "-lsocket -lnsl"
-	fi
-    fi
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/RELEASE.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/RELEASE.md b/thirdparty/librdkafka-0.11.1/packaging/RELEASE.md
deleted file mode 100644
index 28b002d..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/RELEASE.md
+++ /dev/null
@@ -1,116 +0,0 @@
-# librdkafka release process
-
-This guide outlines the steps needed to release a new version of librdkafka
-and publish packages to channels (NuGet, Homebrew, etc,..).
-
-Releases are done in two phases:
- * release-candidate(s) - RC1 will be the first release candidate, and any
-   changes to the repository will require a new RC.
- * final release - the final release is based directly on the last RC tag
-   followed by a single version-bump commit (see below).
-
-Release tag and version format:
- * release-candidate: vA.B.C-RCn
- * final release: vA.B.C
-
-
-
-## Run regression tests
-
-**Build tests:**
-
-    $ cd tests
-    $ make -j build
-
-**Run the full regression test suite:** (requires Linux and the trivup python package)
-
-    $ make full
-
-
-If all tests pass, carry on, otherwise identify and fix bug and start over.
-
-
-## Pre-release code tasks
-
-**Switch to the release branch which is of the format `A.B.C.x` or `A.B.x`.**
-
-    $ git checkout 0.11.1.x
-
-
-**Update in-code versions.**
-
-Change the RD_KAFKA_VERSION defines in src/rdkafka.h and src-cpp/rdkafkacpp.h
-to the version to build, such as 0x000b01c9 for v0.11.1-RC1, or 0x000b01ff for
-the final v0.11.1 release.
-
-The last octet in the version hex number is the pre-build/release-candidate
-number, where 0xAABBCCff is the final release for version 0xAABBCC.
-Release candidates start at 200, thus 0xAABBCCc9 is RC1, 0xAABBCCca is RC2, etc.
-
-
-**Create tag.**
-
-    $  git tag v0.11.1-RC1 # for an RC
-    # #git tag v0.11.1     # for the final release
-
-
-**Push tags and commit to github**
-
-    # Dry-run first to make sure things look correct.
-    $ git push --dry-run --tags origin v0.11.1-RC1
-
-    # Live
-    $ git push --tags origin v0.11.1-RC1
-
-
-
-## Creating packages
-
-As soon as a tag is pushed the CI systems (Travis and AppVeyor) will
-start their builds and eventually upload the packaging artifacts to S3.
-Wait until this process is finished by monitoring the two CIs:
-
- * https://travis-ci.org/edenhill/librdkafka
- * https://ci.appveyor.com/project/edenhill/librdkafka
-
-
-### Create NuGet package
-
-On a Linux host with docker installed, this will also require S3 credentials
-to be set up.
-
-    $ cd package/nuget
-    $ pip install -r requirements.txt  # if necessary
-    $ ./release.py v0.11.1-RC1
-
-Test the generated librdkafka.redist.0.11.1-RC1.nupkg and
-then upload it to NuGet manually:
-
- * https://www.nuget.org/packages/manage/upload
-
-
-### Homebrew recipe update
-
-The brew-update-pr.sh script automatically pushes a PR to homebrew-core
-with a patch to update the librdkafka version of the formula.
-This should only be done for final releases and not release candidates.
-
-On a MacOSX host with homebrew installed:
-
-    $ cd package/homebrew
-    # Dry-run first to see that things are okay.
-    $ ./brew-update-pr.sh v0.11.1
-    # If everything looks good, do the live push:
-    $ ./brew-update-pr.sh --upload v0.11.1
-
-
-### Deb and RPM packaging
-
-Debian and RPM packages are generated by Confluent packaging in a separate
-process and the resulting packages are made available on Confluent's
-APT and YUM repositories.
-
-That process is outside the scope of this document.
-
-See the Confluent docs for instructions how to access these packages:
-https://docs.confluent.io/current/installation.html

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/archlinux/PKGBUILD
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/archlinux/PKGBUILD b/thirdparty/librdkafka-0.11.1/packaging/archlinux/PKGBUILD
deleted file mode 100644
index 9321698..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/archlinux/PKGBUILD
+++ /dev/null
@@ -1,5 +0,0 @@
-pkgname=librdkafka
-pkgver=master
-pkgrel=1
-pkgdesc=The Apache Kafka C/C++ client library
-arch=('i686' 'x86_64')

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/cmake/Config.cmake.in
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/cmake/Config.cmake.in b/thirdparty/librdkafka-0.11.1/packaging/cmake/Config.cmake.in
deleted file mode 100644
index ef9e067..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/cmake/Config.cmake.in
+++ /dev/null
@@ -1,20 +0,0 @@
-@PACKAGE_INIT@
-
-include(CMakeFindDependencyMacro)
-
-if(@WITH_ZLIB@)
-  find_dependency(ZLIB)
-endif()
-
-if(@WITH_SSL@)
-  if(@WITH_BUNDLED_SSL@)
-    # TODO: custom SSL library should be installed
-  else()
-    find_dependency(OpenSSL)
-  endif()
-endif()
-
-find_dependency(Threads)
-
-include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake")
-check_required_components("@PROJECT_NAME@")

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/atomic_32_test.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/atomic_32_test.c b/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/atomic_32_test.c
deleted file mode 100644
index de9738a..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/atomic_32_test.c
+++ /dev/null
@@ -1,8 +0,0 @@
-#include <inttypes.h>
-
-int32_t foo (int32_t i) {
-  return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
-}
-
-int main() {
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/atomic_64_test.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/atomic_64_test.c b/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/atomic_64_test.c
deleted file mode 100644
index a713c74..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/atomic_64_test.c
+++ /dev/null
@@ -1,8 +0,0 @@
-#include <inttypes.h>
-
-int64_t foo (int64_t i) {
-  return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
-}
-
-int main() {
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/rdkafka_setup.cmake
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/rdkafka_setup.cmake b/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/rdkafka_setup.cmake
deleted file mode 100644
index b5a3535..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/rdkafka_setup.cmake
+++ /dev/null
@@ -1,76 +0,0 @@
-try_compile(
-    HAVE_REGEX
-    "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
-    "${TRYCOMPILE_SRC_DIR}/regex_test.c"
-)
-
-try_compile(
-    HAVE_STRNDUP
-    "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
-    "${TRYCOMPILE_SRC_DIR}/strndup_test.c"
-)
-
-# Atomic 32 tests {
-set(LINK_ATOMIC NO)
-set(HAVE_ATOMICS_32 NO)
-set(HAVE_ATOMICS_32_SYNC NO)
-
-try_compile(
-    _atomics_32
-    "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
-    "${TRYCOMPILE_SRC_DIR}/atomic_32_test.c"
-)
-
-if(_atomics_32)
-  set(HAVE_ATOMICS_32 YES)
-else()
-  try_compile(
-      _atomics_32_lib
-      "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
-      "${TRYCOMPILE_SRC_DIR}/atomic_32_test.c"
-      LINK_LIBRARIES "-latomic"
-  )
-  if(_atomics_32_lib)
-    set(HAVE_ATOMICS_32 YES)
-    set(LINK_ATOMIC YES)
-  else()
-    try_compile(
-        HAVE_ATOMICS_32_SYNC
-        "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
-        "${TRYCOMPILE_SRC_DIR}/sync_32_test.c"
-    )
-  endif()
-endif()
-# }
-
-# Atomic 64 tests {
-set(HAVE_ATOMICS_64 NO)
-set(HAVE_ATOMICS_64_SYNC NO)
-
-try_compile(
-    _atomics_64
-    "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
-    "${TRYCOMPILE_SRC_DIR}/atomic_64_test.c"
-)
-
-if(_atomics_64)
-  set(HAVE_ATOMICS_64 YES)
-else()
-  try_compile(
-      _atomics_64_lib
-      "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
-      "${TRYCOMPILE_SRC_DIR}/atomic_64_test.c"
-      LINK_LIBRARIES "-latomic"
-  )
-  if(_atomics_64_lib)
-    set(HAVE_ATOMICS_64 YES)
-    set(LINK_ATOMIC YES)
-  else()
-    try_compile(
-        HAVE_ATOMICS_64_SYNC
-        "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
-        "${TRYCOMPILE_SRC_DIR}/sync_64_test.c"
-    )
-  endif()
-endif()
-# }

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/regex_test.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/regex_test.c b/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/regex_test.c
deleted file mode 100644
index 1d6eeb3..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/regex_test.c
+++ /dev/null
@@ -1,10 +0,0 @@
-#include <stddef.h>
-#include <regex.h>
-
-int main() {
-   regcomp(NULL, NULL, 0);
-   regexec(NULL, NULL, 0, NULL, 0);
-   regerror(0, NULL, NULL, 0);
-   regfree(NULL);
-   return 0;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/strndup_test.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/strndup_test.c b/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/strndup_test.c
deleted file mode 100644
index 9b62043..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/strndup_test.c
+++ /dev/null
@@ -1,5 +0,0 @@
-#include <string.h>
-
-int main() {
-   return strndup("hi", 2) ? 0 : 1;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/sync_32_test.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/sync_32_test.c b/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/sync_32_test.c
deleted file mode 100644
index 44ba120..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/sync_32_test.c
+++ /dev/null
@@ -1,8 +0,0 @@
-#include <inttypes.h>
-
-int32_t foo (int32_t i) {
-  return __sync_add_and_fetch(&i, 1);
-}
-
-int main() {
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/sync_64_test.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/sync_64_test.c b/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/sync_64_test.c
deleted file mode 100644
index ad06204..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/cmake/try_compile/sync_64_test.c
+++ /dev/null
@@ -1,8 +0,0 @@
-#include <inttypes.h>
-
-int64_t foo (int64_t i) {
-  return __sync_add_and_fetch(&i, 1);
-}
-
-int main() {
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/.gitignore
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/.gitignore b/thirdparty/librdkafka-0.11.1/packaging/debian/.gitignore
deleted file mode 100644
index eb66d4d..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-*.log
-files
-librdkafka-dev
-librdkafka1-dbg
-librdkafka1
-tmp

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/changelog
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/changelog b/thirdparty/librdkafka-0.11.1/packaging/debian/changelog
deleted file mode 100644
index c50cb5a..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/changelog
+++ /dev/null
@@ -1,66 +0,0 @@
-librdkafka (0.8.6-1) unstable; urgency=medium
-
-  * New upstream release.
-  * Backport upstream commit f6fd0da, adding --disable-silent-rules
-    compatibility support to mklove. (Closes: #788742)
-
- -- Faidon Liambotis <pa...@debian.org>  Sun, 19 Jul 2015 01:36:18 +0300
-
-librdkafka (0.8.5-2) unstable; urgency=medium
-
-  * Install rdkafka.pc in the right, multiarch location. (Closes: #766759)
-
- -- Faidon Liambotis <pa...@debian.org>  Sun, 26 Oct 2014 06:47:07 +0200
-
-librdkafka (0.8.5-1) unstable; urgency=medium
-
-  * New upstream release.
-    - Fixes kFreeBSD FTBFS.
-  * Ship rdkafka.pc pkg-config in librdkafka-dev.
-
- -- Faidon Liambotis <pa...@debian.org>  Fri, 24 Oct 2014 18:03:22 +0300
-
-librdkafka (0.8.4-1) unstable; urgency=medium
-
-  * New upstream release, including a new build system.
-    - Add Build-Depends on perl, required by configure.
-    - Support multiarch library paths.
-    - Better detection of architecture atomic builtins, supporting more
-      architectures. (Closes: #739930)
-    - Various portability bugs fixed. (Closes: #730506)
-    - Update debian/librdkafka1.symbols.
-  * Convert to a multiarch package.
-  * Switch to Architecture: any, because of renewed upstream portability.
-  * Update debian/copyright to add src/ before Files: paths.
-  * Update Standards-Version to 3.9.6, no changes needed.
-  * Ship only the C library for now, not the new C++ library; the latter is
-    still in flux in some ways and will probably be shipped in a separate
-    package in a future release.
-
- -- Faidon Liambotis <pa...@debian.org>  Wed, 22 Oct 2014 23:57:24 +0300
-
-librdkafka (0.8.3-1) unstable; urgency=medium
-
-  * New upstream release.
-    - Multiple internal symbols hidden; breaks ABI without a SONAME bump, but
-      these were internal and should not break any applications, packaged or
-      not.
-  * Update Standards-Version to 3.9.5, no changes needed.
-
- -- Faidon Liambotis <pa...@debian.org>  Tue, 18 Feb 2014 02:21:43 +0200
-
-librdkafka (0.8.1-1) unstable; urgency=medium
-
-  * New upstream release.
-    - Multiple fixes to FTBFS on various architectures. (Closes: #730506)
-    - Remove dh_auto_clean override, fixed upstream.
-  * Limit the set of architectures: upstream currently relies on 64-bit atomic
-    operations that several Debian architectures do not support.
-
- -- Faidon Liambotis <pa...@debian.org>  Thu, 05 Dec 2013 16:53:28 +0200
-
-librdkafka (0.8.0-1) unstable; urgency=low
-
-  * Initial release. (Closes: #710271)
-
- -- Faidon Liambotis <pa...@debian.org>  Mon, 04 Nov 2013 16:50:07 +0200

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/compat
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/compat b/thirdparty/librdkafka-0.11.1/packaging/debian/compat
deleted file mode 100644
index ec63514..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-9

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/control
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/control b/thirdparty/librdkafka-0.11.1/packaging/debian/control
deleted file mode 100644
index 8274798..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/control
+++ /dev/null
@@ -1,49 +0,0 @@
-Source: librdkafka
-Priority: optional
-Maintainer: Faidon Liambotis <pa...@debian.org>
-Build-Depends: debhelper (>= 9), zlib1g-dev, libssl-dev, libsasl2-dev, python
-Standards-Version: 3.9.6
-Section: libs
-Homepage: https://github.com/edenhill/librdkafka
-Vcs-Git: git://github.com/edenhill/librdkafka.git -b debian
-Vcs-Browser: https://github.com/edenhill/librdkafka/tree/debian
-
-Package: librdkafka1
-Architecture: any
-Multi-Arch: same
-Depends: ${shlibs:Depends}, ${misc:Depends}
-Description: library implementing the Apache Kafka protocol
- librdkafka is a C implementation of the Apache Kafka protocol. It currently
- implements the 0.8 version of the protocol and can be used to develop both
- Producers and Consumers.
- .
- More information about Apache Kafka can be found at http://kafka.apache.org/
-
-Package: librdkafka-dev
-Section: libdevel
-Architecture: any
-Multi-Arch: same
-Depends: librdkafka1 (= ${binary:Version}), ${misc:Depends}
-Description: library implementing the Apache Kafka protocol (development headers)
- librdkafka is a C implementation of the Apache Kafka protocol. It currently
- implements the 0.8 version of the protocol and can be used to develop both
- Producers and Consumers.
- .
- More information about Apache Kafka can be found at http://kafka.apache.org/
- .
- This package contains the development headers.
-
-Package: librdkafka1-dbg
-Section: debug
-Priority: extra
-Architecture: any
-Multi-Arch: same
-Depends: librdkafka1 (= ${binary:Version}), ${misc:Depends}
-Description: library implementing the Apache Kafka protocol (debugging symbols)
- librdkafka is a C implementation of the Apache Kafka protocol. It currently
- implements the 0.8 version of the protocol and can be used to develop both
- Producers and Consumers.
- .
- More information about Apache Kafka can be found at http://kafka.apache.org/
- .
- This package contains the debugging symbols.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/copyright
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/copyright b/thirdparty/librdkafka-0.11.1/packaging/debian/copyright
deleted file mode 100644
index 20885d9..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/copyright
+++ /dev/null
@@ -1,84 +0,0 @@
-Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
-Upstream-Name: librdkafka
-Source: https://github.com/edenhill/librdkafka
-
-License: BSD-2-clause
-  Redistribution and use in source and binary forms, with or without
-  modification, are permitted provided that the following conditions are met:
-  .
-  1. Redistributions of source code must retain the above copyright notice,
-     this list of conditions and the following disclaimer.
-  2. Redistributions in binary form must reproduce the above copyright notice,
-     this list of conditions and the following disclaimer in the documentation
-     and/or other materials provided with the distribution.
-  .
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-Files: *
-Copyright: 2012-2015, Magnus Edenhill
-License: BSD-2-clause
-
-Files: src/rdcrc32.c src/rdcrc32.h
-Copyright: 2006-2012, Thomas Pircher <te...@gmx.net>
-License: MIT
-  Permission is hereby granted, free of charge, to any person obtaining a copy
-  of this software and associated documentation files (the "Software"), to deal
-  in the Software without restriction, including without limitation the rights
-  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-  copies of the Software, and to permit persons to whom the Software is
-  furnished to do so, subject to the following conditions:
-  .
-  The above copyright notice and this permission notice shall be included in
-  all copies or substantial portions of the Software.
-  . 
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-  THE SOFTWARE.
-
-Files: src/snappy.c src/snappy.h src/snappy_compat.h
-Copyright: 2005, Google Inc.
-           2011, Intel Corporation
-License: BSD-3-clause
-  Redistribution and use in source and binary forms, with or without
-  modification, are permitted provided that the following conditions are
-  met:
-  .
-      * Redistributions of source code must retain the above copyright
-  notice, this list of conditions and the following disclaimer.
-      * Redistributions in binary form must reproduce the above
-  copyright notice, this list of conditions and the following disclaimer
-  in the documentation and/or other materials provided with the
-  distribution.
-      * Neither the name of Google Inc. nor the names of its
-  contributors may be used to endorse or promote products derived from
-  this software without specific prior written permission.
-  .
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-Files: debian/*
-Copyright: 2013 Faidon Liambotis <pa...@debian.org>
-License: BSD-2-clause

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/docs
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/docs b/thirdparty/librdkafka-0.11.1/packaging/debian/docs
deleted file mode 100644
index 891afcd..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/docs
+++ /dev/null
@@ -1,3 +0,0 @@
-README.md
-INTRODUCTION.md
-CONFIGURATION.md

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/gbp.conf
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/gbp.conf b/thirdparty/librdkafka-0.11.1/packaging/debian/gbp.conf
deleted file mode 100644
index b2a0f02..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/gbp.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[buildpackage]
-upstream-tree=tag
-upstream-branch=master
-debian-branch=debian
-upstream-tag=%(version)s
-debian-tag=debian/%(version)s
-no-create-orig = True
-tarball-dir = ../tarballs
-export-dir = ../build-area

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.dirs
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.dirs b/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.dirs
deleted file mode 100644
index 4418816..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.dirs
+++ /dev/null
@@ -1,2 +0,0 @@
-usr/lib
-usr/include

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.examples
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.examples b/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.examples
deleted file mode 100644
index b45032e..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.examples
+++ /dev/null
@@ -1,2 +0,0 @@
-examples/rdkafka_example.c
-examples/rdkafka_performance.c

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.install
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.install b/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.install
deleted file mode 100644
index 478f660..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.install
+++ /dev/null
@@ -1,6 +0,0 @@
-usr/include/*/rdkafka.h
-usr/include/*/rdkafkacpp.h
-usr/lib/*/librdkafka.a
-usr/lib/*/librdkafka.so
-usr/lib/*/librdkafka++.a
-usr/lib/*/librdkafka++.so

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.substvars
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.substvars b/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.substvars
deleted file mode 100644
index abd3ebe..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka-dev.substvars
+++ /dev/null
@@ -1 +0,0 @@
-misc:Depends=

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka.dsc
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka.dsc b/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka.dsc
deleted file mode 100644
index 65826d4..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka.dsc
+++ /dev/null
@@ -1,16 +0,0 @@
-Format: 3.0 (quilt)
-Source: librdkafka
-Binary: librdkafka1, librdkafka-dev, librdkafka1-dbg
-Architecture: any
-Version: 0.9.1-1pre1
-Maintainer: Magnus Edenhill <li...@edenhill.se>
-Homepage: https://github.com/edenhill/librdkafka
-Standards-Version: 3.9.6
-Vcs-Browser: https://github.com/edenhill/librdkafka/tree/master
-Vcs-Git: git://github.com/edenhill/librdkafka.git -b master
-Build-Depends: debhelper (>= 9), zlib1g-dev, libssl-dev, libsasl2-dev, python
-Package-List:
- librdkafka-dev deb libdevel optional arch=any
- librdkafka1 deb libs optional arch=any
- librdkafka1-dbg deb debug extra arch=any
-Original-Maintainer: Faidon Liambotis <pa...@debian.org>

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1-dbg.substvars
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1-dbg.substvars b/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1-dbg.substvars
deleted file mode 100644
index abd3ebe..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1-dbg.substvars
+++ /dev/null
@@ -1 +0,0 @@
-misc:Depends=

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.dirs
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.dirs b/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.dirs
deleted file mode 100644
index 6845771..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.dirs
+++ /dev/null
@@ -1 +0,0 @@
-usr/lib

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.install
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.install b/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.install
deleted file mode 100644
index 7e86e5f..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.install
+++ /dev/null
@@ -1,2 +0,0 @@
-usr/lib/*/librdkafka.so.*
-usr/lib/*/librdkafka++.so.*

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.postinst.debhelper
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.postinst.debhelper b/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.postinst.debhelper
deleted file mode 100644
index 3d89d3e..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.postinst.debhelper
+++ /dev/null
@@ -1,5 +0,0 @@
-# Automatically added by dh_makeshlibs
-if [ "$1" = "configure" ]; then
-	ldconfig
-fi
-# End automatically added section

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.postrm.debhelper
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.postrm.debhelper b/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.postrm.debhelper
deleted file mode 100644
index 7f44047..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.postrm.debhelper
+++ /dev/null
@@ -1,5 +0,0 @@
-# Automatically added by dh_makeshlibs
-if [ "$1" = "remove" ]; then
-	ldconfig
-fi
-# End automatically added section

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.symbols
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.symbols b/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.symbols
deleted file mode 100644
index 0ef576e..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/librdkafka1.symbols
+++ /dev/null
@@ -1,64 +0,0 @@
-librdkafka.so.1 librdkafka1 #MINVER#
-* Build-Depends-Package: librdkafka-dev
- rd_kafka_brokers_add@Base 0.8.0
- rd_kafka_conf_destroy@Base 0.8.0
- rd_kafka_conf_dump@Base 0.8.3
- rd_kafka_conf_dump_free@Base 0.8.3
- rd_kafka_conf_dup@Base 0.8.3
- rd_kafka_conf_new@Base 0.8.0
- rd_kafka_conf_properties_show@Base 0.8.0
- rd_kafka_conf_set@Base 0.8.0
- rd_kafka_conf_set_dr_cb@Base 0.8.0
- rd_kafka_conf_set_dr_msg_cb@Base 0.8.4
- rd_kafka_conf_set_error_cb@Base 0.8.0
- rd_kafka_conf_set_log_cb@Base 0.8.4
- rd_kafka_conf_set_opaque@Base 0.8.0
- rd_kafka_conf_set_open_cb@Base 0.8.4
- rd_kafka_conf_set_socket_cb@Base 0.8.4
- rd_kafka_conf_set_stats_cb@Base 0.8.0
- rd_kafka_consume@Base 0.8.0
- rd_kafka_consume_batch@Base 0.8.0
- rd_kafka_consume_batch_queue@Base 0.8.4
- rd_kafka_consume_callback@Base 0.8.0
- rd_kafka_consume_callback_queue@Base 0.8.4
- rd_kafka_consume_queue@Base 0.8.4
- rd_kafka_consume_start@Base 0.8.0
- rd_kafka_consume_start_queue@Base 0.8.4
- rd_kafka_consume_stop@Base 0.8.0
- rd_kafka_destroy@Base 0.8.0
- rd_kafka_dump@Base 0.8.0
- rd_kafka_err2str@Base 0.8.0
- rd_kafka_errno2err@Base 0.8.3
- rd_kafka_log_print@Base 0.8.0
- rd_kafka_log_syslog@Base 0.8.0
- rd_kafka_message_destroy@Base 0.8.0
- rd_kafka_metadata@Base 0.8.4
- rd_kafka_metadata_destroy@Base 0.8.4
- rd_kafka_msg_partitioner_random@Base 0.8.0
- rd_kafka_name@Base 0.8.0
- rd_kafka_new@Base 0.8.0
- rd_kafka_offset_store@Base 0.8.3
- rd_kafka_opaque@Base 0.8.4
- rd_kafka_outq_len@Base 0.8.0
- rd_kafka_poll@Base 0.8.0
- rd_kafka_produce@Base 0.8.0
- rd_kafka_produce_batch@Base 0.8.4
- rd_kafka_queue_destroy@Base 0.8.4
- rd_kafka_queue_new@Base 0.8.4
- rd_kafka_set_log_level@Base 0.8.0
- rd_kafka_set_logger@Base 0.8.0
- rd_kafka_thread_cnt@Base 0.8.0
- rd_kafka_topic_conf_destroy@Base 0.8.0
- rd_kafka_topic_conf_dump@Base 0.8.3
- rd_kafka_topic_conf_dup@Base 0.8.3
- rd_kafka_topic_conf_new@Base 0.8.0
- rd_kafka_topic_conf_set@Base 0.8.0
- rd_kafka_topic_conf_set_opaque@Base 0.8.0
- rd_kafka_topic_conf_set_partitioner_cb@Base 0.8.0
- rd_kafka_topic_destroy@Base 0.8.0
- rd_kafka_topic_name@Base 0.8.0
- rd_kafka_topic_new@Base 0.8.0
- rd_kafka_topic_partition_available@Base 0.8.0
- rd_kafka_version@Base 0.8.1
- rd_kafka_version_str@Base 0.8.1
- rd_kafka_wait_destroyed@Base 0.8.0

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/rules
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/rules b/thirdparty/librdkafka-0.11.1/packaging/debian/rules
deleted file mode 100755
index d8bfaa5..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/rules
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/make -f
-
-# Uncomment this to turn on verbose mode.
-#export DH_VERBOSE=1
-
-%:
-	dh $@
-
-override_dh_strip:
-	dh_strip --dbg-package=librdkafka1-dbg
-
-override_dh_auto_install:
-	dh_auto_install
-	install -D -m 0644 rdkafka.pc \
-		debian/librdkafka-dev/usr/lib/${DEB_HOST_MULTIARCH}/pkgconfig/rdkafka.pc
-
-.PHONY: override_dh_strip override_dh_auth_install

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/source/format
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/source/format b/thirdparty/librdkafka-0.11.1/packaging/debian/source/format
deleted file mode 100644
index 163aaf8..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/source/format
+++ /dev/null
@@ -1 +0,0 @@
-3.0 (quilt)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/debian/watch
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/debian/watch b/thirdparty/librdkafka-0.11.1/packaging/debian/watch
deleted file mode 100644
index fc9aec8..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/debian/watch
+++ /dev/null
@@ -1,2 +0,0 @@
-version=3
-http://github.com/edenhill/librdkafka/tags .*/(\d[\d\.]*)\.tar\.gz

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/get_version.py
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/get_version.py b/thirdparty/librdkafka-0.11.1/packaging/get_version.py
deleted file mode 100755
index 3d98d21..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/get_version.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-
-if len(sys.argv) != 2:
-    raise Exception('Usage: %s path/to/rdkafka.h' % sys.argv[0])
-
-kafka_h_file = sys.argv[1]
-f = open(kafka_h_file)
-for line in f:
-    if '#define RD_KAFKA_VERSION' in line:
-        version = line.split()[-1]
-        break
-f.close()
-
-major = int(version[2:4], 16)
-minor = int(version[4:6], 16)
-patch = int(version[6:8], 16)
-version = '.'.join(str(item) for item in (major, minor, patch))
-
-print version

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/homebrew/README.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/homebrew/README.md b/thirdparty/librdkafka-0.11.1/packaging/homebrew/README.md
deleted file mode 100644
index a23a085..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/homebrew/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# Update the Homebrew librdkafka package version
-
-The `./brew-update-pr.sh` script in this directory updates the
-brew formula for librdkafka and pushes a PR to the homebrew-core repository.
-
-You should run it in two steps, first an implicit dry-run mode
-to check that things seem correct, and if that checks out a
-live upload mode which actually pushes the PR.
-
-    # Do a dry-run first, v0.11.0 is the librdkafka tag:
-    $ ./brew-update-pr.sh v0.11.0
-
-    # If everything looks okay, run the live upload mode:
-    $ ./brew-update-pr.sh --upload v0.11.0
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/homebrew/brew-update-pr.sh
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/homebrew/brew-update-pr.sh b/thirdparty/librdkafka-0.11.1/packaging/homebrew/brew-update-pr.sh
deleted file mode 100755
index f756159..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/homebrew/brew-update-pr.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-#
-# Automatically pushes a PR to homebrew-core to update
-# the librdkafka version.
-#
-# Usage:
-#   # Dry-run:
-#   ./brew-update-pr.sh v0.11.0
-#   # if everything looks good:
-#   ./brew-update-pr.sh --upload v0.11.0
-#
-
-
-DRY_RUN="--dry-run"
-if [[ $1 == "--upload" ]]; then
-   DRY_RUN=
-   shift
-fi
-
-TAG=$1
-
-if [[ -z $TAG ]]; then
-    echo "Usage: $0 [--upload] <librdkafka-tag>"
-    exit 1
-fi
-
-set -eu
-
-brew bump-formula-pr $DRY_RUN --strict \
-     --url=https://github.com/edenhill/librdkafka/archive/${TAG}.tar.gz \
-     librdkafka

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/nuget/.gitignore
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/nuget/.gitignore b/thirdparty/librdkafka-0.11.1/packaging/nuget/.gitignore
deleted file mode 100644
index 712f08d..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/nuget/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-dl-*
-out-*
-*.nupkg
-*.pyc
-__pycache__

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/nuget/README.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/nuget/README.md b/thirdparty/librdkafka-0.11.1/packaging/nuget/README.md
deleted file mode 100644
index 720a767..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/nuget/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# NuGet package assembly
-
-This set of scripts collect CI artifacts from S3 and assembles
-them into a NuGet package structure staging directory.
-The NuGet tool is then run (from within docker) on this staging directory
-to create a proper NuGet package (with all the metadata).
-
-The finalized nuget package maybe uploaded manually to NuGet.org
-
-## Requirements
-
- * Requires Python 2.x (due to Python 3 compat issues with rpmfile)
- * Requires Docker
- * Requires private S3 access keys for the librdkafka-ci-packages bucket.
-
-
-
-## Usage
-
-1. Trigger CI builds by creating and pushing a new release (candidate) tag
-   in the librdkafka repo. Make sure the tag is created on the correct branch.
-
-    $ git tag v0.11.0
-    $ git push origin v0.11.0
-
-2. Wait for CI builds to finish, monitor the builds here:
-
- * https://travis-ci.org/edenhill/librdkafka
- * https://ci.appveyor.com/project/edenhill/librdkafka
-
-3. On a Linux host, run the release.py script to assemble the NuGet package
-
-    $ cd packaging/nuget
-    # Specify the tag
-    $ ./release.py v0.11.0
-    # Optionally, if the tag was moved and an exact sha is also required:
-    # $ ./release.py --sha <the-full-git-sha> v0.11.0
-
-4. If all artifacts were available the NuGet package will be built
-   and reside in the current directory as librdkafka.redist.<v-less-tag>.nupkg
-
-5. Test the package manually
-
-6. Upload the package to NuGet
-
- * https://www.nuget.org/packages/manage/upload
-
-
-
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/nuget/artifact.py
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/nuget/artifact.py b/thirdparty/librdkafka-0.11.1/packaging/nuget/artifact.py
deleted file mode 100755
index 61b1d80..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/nuget/artifact.py
+++ /dev/null
@@ -1,173 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Collects CI artifacts from S3 storage, downloading them
-# to a local directory.
-#
-# The artifacts' folder in the S3 bucket must have the following token
-# format:
-#  <token>-[<value>]__   (repeat)
-#
-# Recognized tokens (unrecognized tokens are ignored):
-#  p       - project (e.g., "confluent-kafka-python")
-#  bld     - builder (e.g., "travis")
-#  plat    - platform ("osx", "linux", ..)
-#  arch    - arch ("x64", ..)
-#  tag     - git tag
-#  sha     - git sha
-#  bid     - builder's build-id
-#  bldtype - Release, Debug (appveyor)
-#
-# Example:
-#   p-confluent-kafka-python__bld-travis__plat-linux__tag-__sha-112130ce297656ea1c39e7c94c99286f95133a24__bid-271588764__/confluent_kafka-0.11.0-cp35-cp35m-manylinux1_x86_64.whl
-
-
-import re
-import os
-import argparse
-import boto3
-
-s3_bucket = 'librdkafka-ci-packages'
-dry_run = False
-
-class Artifact (object):
-    def __init__(self, arts, path, info=None):
-        self.path = path
-        # Remove unexpanded AppVeyor $(..) tokens from filename
-        self.fname = re.sub(r'\$\([^\)]+\)', '', os.path.basename(path))
-        slpath = os.path.join(os.path.dirname(path), self.fname)
-        if os.path.isfile(slpath):
-            # Already points to local file in correct location
-            self.lpath = slpath
-        else:
-            # Prepare download location in dlpath
-            self.lpath = os.path.join(arts.dlpath, slpath)
-
-        if info is None:
-            self.info = dict()
-        else:
-            # Assign the map and convert all keys to lower case
-            self.info = {k.lower(): v for k, v in info.items()}
-            # Rename values, e.g., 'plat':'linux' to 'plat':'debian'
-            for k,v in self.info.items():
-                rdict = packaging.rename_vals.get(k, None)
-                if rdict is not None:
-                    self.info[k] = rdict.get(v, v)
-
-        # Score value for sorting
-        self.score = 0
-
-        # AppVeyor symbol builds are of less value
-        if self.fname.find('.symbols.') != -1:
-            self.score -= 10
-
-        self.arts = arts
-        arts.artifacts.append(self)
-
-
-    def __repr__(self):
-        return self.path
-
-    def __lt__ (self, other):
-        return self.score < other.score
-
-    def download(self):
-        """ Download artifact from S3 and store in local directory .lpath.
-            If the artifact is already downloaded nothing is done. """
-        if os.path.isfile(self.lpath) and os.path.getsize(self.lpath) > 0:
-            return
-        print('Downloading %s -> %s' % (self.path, self.lpath))
-        if dry_run:
-            return
-        ldir = os.path.dirname(self.lpath)
-        if not os.path.isdir(ldir):
-            os.makedirs(ldir, 0o755)
-        self.arts.s3_bucket.download_file(self.path, self.lpath)
-
-
-class Artifacts (object):
-    def __init__(self, match, dlpath):
-        super(Artifacts, self).__init__()
-        self.match = match
-        self.artifacts = list()
-        # Download directory (make sure it ends with a path separator)
-        if not dlpath.endswith(os.path.sep):
-            dlpath = os.path.join(dlpath, '')
-        self.dlpath = dlpath
-        if not os.path.isdir(self.dlpath):
-            if not dry_run:
-                os.makedirs(self.dlpath, 0o755)
-
-    def collect_single(self, path, req_tag=True):
-        """ Collect single artifact, be it in S3 or locally.
-        :param: path string: S3 or local (relative) path
-        :param: req_tag bool: Require tag to match.
-        """
-
-        print('?  %s' % path)
-
-        # For local files, strip download path.
-        # Also ignore any parent directories.
-        if path.startswith(self.dlpath):
-            folder = os.path.basename(os.path.dirname(path[len(self.dlpath):]))
-        else:
-            folder = os.path.basename(os.path.dirname(path))
-
-        # The folder contains the tokens needed to perform
-        # matching of project, gitref, etc.
-        rinfo = re.findall(r'(?P<tag>[^-]+)-(?P<val>.*?)__', folder)
-        if rinfo is None or len(rinfo) == 0:
-            print('Incorrect folder/file name format for %s' % folder)
-            return None
-
-        info = dict(rinfo)
-
-        # Ignore AppVeyor Debug builds
-        if info.get('bldtype', '').lower() == 'debug':
-            print('Ignoring debug artifact %s' % folder)
-            return None
-
-        tag = info.get('tag', None)
-        if tag is not None and (len(tag) == 0 or tag.startswith('$(')):
-            # AppVeyor doesn't substite $(APPVEYOR_REPO_TAG_NAME)
-            # with an empty value when not set, it leaves that token
-            # in the string - so translate that to no tag.
-            del info['tag']
-
-        # Match tag or sha to gitref
-        unmatched = list()
-        for m,v in self.match.items():
-            if m not in info or info[m] != v:
-                unmatched.append(m)
-
-        # Make sure all matches were satisfied, unless this is a
-        # common artifact.
-        if info.get('p', '') != 'common' and len(unmatched) > 0:
-            print(info)
-            print('%s: %s did not match %s' % (info.get('p', None), folder, unmatched))
-            return None
-
-        return Artifact(self, path, info)
-
-
-    def collect_s3(self):
-        """ Collect and download build-artifacts from S3 based on git reference """
-        print('Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket))
-        self.s3 = boto3.resource('s3')
-        self.s3_bucket = self.s3.Bucket(s3_bucket)
-        self.s3_client = boto3.client('s3')
-        for item in self.s3_client.list_objects(Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'):
-            self.collect_single(item.get('Key'))
-
-        for a in self.artifacts:
-            a.download()
-
-    def collect_local(self, path, req_tag=True):
-        """ Collect artifacts from a local directory possibly previously
-        collected from s3 """
-        for f in [os.path.join(dp, f) for dp, dn, filenames in os.walk(path) for f in filenames]:
-            if not os.path.isfile(f):
-                continue
-            self.collect_single(f, req_tag)
-
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/nuget/nuget.sh
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/nuget/nuget.sh b/thirdparty/librdkafka-0.11.1/packaging/nuget/nuget.sh
deleted file mode 100755
index 0323712..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/nuget/nuget.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-#
-#
-# Front-end for nuget that runs nuget in a docker image.
-
-set -ex
-
-if [[ -f /.dockerenv ]]; then
-    echo "Inside docker"
-
-    pushd $(dirname $0)
-
-    nuget $*
-
-    popd
-
-else
-    echo "Running docker image"
-    docker run -v $(pwd):/io mono:latest /io/$0 $*
-fi
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/nuget/packaging.py
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/nuget/packaging.py b/thirdparty/librdkafka-0.11.1/packaging/nuget/packaging.py
deleted file mode 100755
index 71d5b29..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/nuget/packaging.py
+++ /dev/null
@@ -1,596 +0,0 @@
-#!/usr/bin/env python
-#
-# NuGet packaging script.
-# Assembles a NuGet package using CI artifacts in S3
-# and calls nuget (in docker) to finalize the package.
-#
-
-import sys
-import re
-import os
-import tempfile
-import shutil
-import subprocess
-import urllib
-from string import Template
-from collections import defaultdict
-import boto3
-from zfile import zfile
-
-# File categories
-categories = ['dynamic', # dynamic libraries
-              'static',  # static libraries
-              'pc',      # pkg-config
-              'include'] # include files / headers
-
-win_ver = 'win7'
-# Maps platform and category to expected files, or vice versa.
-wanted_files = {
-    win_ver: {
-        'dynamic': ['librdkafka.dll', 'librdkafkacpp.dll',
-                    'librdkafka.lib', 'librdkafkacpp.lib',
-                    'msvcr120.dll', 'zlib.dll'],
-        'static': ['librdkafka.lib', 'librdkafkacpp.lib'],
-        'include': ['rdkafka.h', 'rdkafkacpp.h'],
-    },
-
-    'osx': {
-        'dynamic': ['librdkafka.dylib', 'librdkafka++.dylib'],
-        'static': ['librdkafka.a', 'librdkafka++.a'],
-        'include': ['rdkafka.h', 'rdkafkacpp.h'],
-        'pc': ['rdkafka.pc', 'rdkafka++.pc'],
-    },
-
-    'debian': {
-        'dynamic': ['librdkafka.so.1', 'librdkafka++.so.1'],
-        'static': ['librdkafka.a', 'librdkafka++.a'],
-        'include': ['rdkafka.h', 'rdkafkacpp.h'],
-        'pc': ['rdkafka.pc', 'rdkafka++.pc'],
-    },
-
-    'rhel':  {
-        'dynamic': ['librdkafka.so.1', 'librdkafka++.so.1'],
-        'static': ['librdkafka.a', 'librdkafka++.a'],
-        'include': ['rdkafka.h', 'rdkafkacpp.h'],
-        'pc': ['rdkafka.pc', 'rdkafka++.pc'],
-    }
-}
-
-# Supported platforms
-platforms = wanted_files.keys()
-
-
-
-# Default documents to include in all packages
-default_doc = ['../../README.md',
-               '../../CONFIGURATION.md',
-               '../../LICENSES.txt']
-
-# Rename matching files
-rename_files = {'librdkafka.so.1': 'librdkafka.so',
-                'librdkafka++.so.1': 'librdkafka++.so'}
-
-
-# Rename token values
-rename_vals = {'plat': {'linux': 'debian',
-                        'windows': win_ver},
-               'arch': {'x86_64': 'x64',
-                        'i386': 'x86',
-                        'win32': 'x86'}}
-
-
-
-# Collects CI artifacts from S3 storage, downloading them
-# to a local directory, or collecting already downloaded artifacts from
-# local directory.
-#
-# The artifacts' folder in the S3 bucket must have the following token
-# format:
-#  <token>-[<value>]__   (repeat)
-#
-# Recognized tokens (unrecognized tokens are ignored):
-#  p       - project (e.g., "confluent-kafka-python")
-#  bld     - builder (e.g., "travis")
-#  plat    - platform ("osx", "linux", ..)
-#  arch    - arch ("x64", ..)
-#  tag     - git tag
-#  sha     - git sha
-#  bid     - builder's build-id
-#  bldtype - Release, Debug (appveyor)
-#
-# Example:
-#   librdkafka/p-librdkafka__bld-travis__plat-linux__arch-x64__tag-v0.0.62__sha-d051b2c19eb0c118991cd8bc5cf86d8e5e446cde__bid-1562.1/librdkafka.tar.gz
-
-
-s3_bucket = 'librdkafka-ci-packages'
-dry_run = False
-
-class Artifact (object):
-    def __init__(self, arts, path, info=None):
-        self.path = path
-        # Remove unexpanded AppVeyor $(..) tokens from filename
-        self.fname = re.sub(r'\$\([^\)]+\)', '', os.path.basename(path))
-        slpath = os.path.join(os.path.dirname(path), self.fname)
-        if os.path.isfile(slpath):
-            # Already points to local file in correct location
-            self.lpath = slpath
-        else:
-            # Prepare download location in dlpath
-            self.lpath = os.path.join(arts.dlpath, slpath)
-
-        if info is None:
-            self.info = dict()
-        else:
-            # Assign the map and convert all keys to lower case
-            self.info = {k.lower(): v for k, v in info.items()}
-            # Rename values, e.g., 'plat':'linux' to 'plat':'debian'
-            for k,v in self.info.items():
-                rdict = rename_vals.get(k, None)
-                if rdict is not None:
-                    self.info[k] = rdict.get(v, v)
-
-        # Score value for sorting
-        self.score = 0
-
-        # AppVeyor symbol builds are of less value
-        if self.fname.find('.symbols.') != -1:
-            self.score -= 10
-
-        self.arts = arts
-        arts.artifacts.append(self)
-
-
-    def __repr__(self):
-        return self.path
-
-    def __lt__ (self, other):
-        return self.score < other.score
-
-    def download(self):
-        """ Download artifact from S3 and store in local directory .lpath.
-            If the artifact is already downloaded nothing is done. """
-        if os.path.isfile(self.lpath) and os.path.getsize(self.lpath) > 0:
-            return
-        print('Downloading %s' % self.path)
-        if dry_run:
-            return
-        ldir = os.path.dirname(self.lpath)
-        if not os.path.isdir(ldir):
-            os.makedirs(ldir, 0o755)
-        self.arts.s3_bucket.download_file(self.path, self.lpath)
-
-
-class Artifacts (object):
-    def __init__(self, match, dlpath):
-        super(Artifacts, self).__init__()
-        self.match = match
-        self.artifacts = list()
-        # Download directory (make sure it ends with a path separator)
-        if not dlpath.endswith(os.path.sep):
-            dlpath = os.path.join(dlpath, '')
-        self.dlpath = dlpath
-        if not os.path.isdir(self.dlpath):
-            if not dry_run:
-                os.makedirs(self.dlpath, 0o755)
-
-    def collect_single(self, path, req_tag=True):
-        """ Collect single artifact, be it in S3 or locally.
-        :param: path string: S3 or local (relative) path
-        :param: req_tag bool: Require tag to match.
-        """
-
-        #print('?  %s' % path)
-
-        # For local files, strip download path.
-        # Also ignore any parent directories.
-        if path.startswith(self.dlpath):
-            folder = os.path.basename(os.path.dirname(path[len(self.dlpath):]))
-        else:
-            folder = os.path.basename(os.path.dirname(path))
-
-        # The folder contains the tokens needed to perform
-        # matching of project, gitref, etc.
-        rinfo = re.findall(r'(?P<tag>[^-]+)-(?P<val>.*?)__', folder)
-        if rinfo is None or len(rinfo) == 0:
-            print('Incorrect folder/file name format for %s' % folder)
-            return None
-
-        info = dict(rinfo)
-
-        # Ignore AppVeyor Debug builds
-        if info.get('bldtype', '').lower() == 'debug':
-            print('Ignoring debug artifact %s' % folder)
-            return None
-
-        tag = info.get('tag', None)
-        if tag is not None and (len(tag) == 0 or tag.startswith('$(')):
-            # AppVeyor doesn't substite $(APPVEYOR_REPO_TAG_NAME)
-            # with an empty value when not set, it leaves that token
-            # in the string - so translate that to no tag.
-            del info['tag']
-
-        # Perform matching
-        unmatched = list()
-        for m,v in self.match.items():
-            if m not in info or info[m] != v:
-                unmatched.append(m)
-
-        # Make sure all matches were satisfied, unless this is a
-        # common artifact.
-        if info.get('p', '') != 'common' and len(unmatched) > 0:
-            # print('%s: %s did not match %s' % (info.get('p', None), folder, unmatched))
-            return None
-
-        return Artifact(self, path, info)
-
-
-    def collect_s3(self):
-        """ Collect and download build-artifacts from S3 based on git reference """
-        print('Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket))
-        self.s3 = boto3.resource('s3')
-        self.s3_bucket = self.s3.Bucket(s3_bucket)
-        self.s3_client = boto3.client('s3')
-        for item in self.s3_client.list_objects(Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'):
-            self.collect_single(item.get('Key'))
-
-        for a in self.artifacts:
-            a.download()
-
-    def collect_local(self, path, req_tag=True):
-        """ Collect artifacts from a local directory possibly previously
-        collected from s3 """
-        for f in [os.path.join(dp, f) for dp, dn, filenames in os.walk(path) for f in filenames]:
-            if not os.path.isfile(f):
-                continue
-            self.collect_single(f, req_tag)
-
-
-
-
-class Package (object):
-    """ Generic Package class
-        A Package is a working container for one or more output
-        packages for a specific package type (e.g., nuget) """
-
-    def __init__ (self, version, arts, ptype):
-        super(Package, self).__init__()
-        self.version = version
-        self.arts = arts
-        self.ptype = ptype
-        # These may be overwritten by specific sub-classes:
-        self.artifacts = arts.artifacts
-        self.platforms = platforms
-        # Staging path, filled in later.
-        self.stpath = None
-        self.kv = {'version': version}
-        self.files = dict()
-
-    def add_file (self, file):
-        self.files[file] = True
-
-    def categorize (self):
-        """ Categorize and arrange a Package's artifacts according to
-            its platforms.
-            Returns a fout map:
-              category: [(artifact,file)]
-        """
-
-        fout = defaultdict(list)
-
-        # Flat lists of files to collect keyed by platform,category
-        collect_files = dict()
-        for platform in wanted_files:
-            for category, flist in wanted_files[platform].items():
-                for f in flist:
-                    collect_files[(platform,category,f)] = list()
-
-        for a in self.artifacts:
-            try:
-                with zfile.ZFile(a.lpath, 'r') as zf:
-                    if os.path.splitext(a.lpath)[-1] == '.rpm':
-                        a.info['plat'] = 'rhel'
-
-                    platform = a.info['plat']
-                    if platform not in platforms:
-                        continue
-
-                    zfiles = zf.getnames()
-                    if len(zfiles) == 0:
-                        print('No files in %s?' % a)
-                    for category, flist in wanted_files[platform].items():
-                        for f in flist:
-                            matches = [(a,x) for x in zfiles if os.path.basename(x) == f]
-                            if len(matches) > 0:
-                                collect_files[(platform,category,f)] += matches
-                                fout[category] += matches
-
-            except zfile.tarfile.ReadError as e:
-                print('ignoring artifact: %s: %s' % (a.lpath, str(e)))
-
-        # Verify that all wanted combinations were matched
-        errors = 0
-        for missing in [x for x in collect_files if len(collect_files[x]) == 0]:
-            errors += 1
-            print('ERROR: No matching artifact files for', missing)
-
-        if errors > 0:
-            raise Exception('Not all wanted files found in artifacts, see above.')
-        return fout
-
-
-    def layout (self, lydef):
-        """
-        Layout categorized files according to provided
-        layout definition \p lydef.
-
-        Returns a layout dict containing the matched artifacts.
-        """
-
-        # Categorize files
-        fout = self.categorize()
-
-        ly = defaultdict(list)
-
-        # For each template path, attempt to map all files in that category
-        # and add any files that renders completely to the layout.
-        for tmplsrc, category in lydef.items():
-             tmpl = Template(tmplsrc)
-             for a, f in fout[category]:
-                 # print('%s: Try %s matched to %s in %s' % (category, tmplsrc, f, a))
-                 try:
-                     path = os.path.join(tmpl.substitute(a.info),
-                                         os.path.basename(f))
-                     ly[path].append((a, f))
-                 except KeyError as e:
-                     print(' -- %s info key %s not found' % (a, e))
-                     pass
-
-        # Sort providing sources for each path.
-        # E.g., prefer .redist. before .symbols., etc.
-        for path in ly:
-            ly[path].sort(reverse=True)
-
-        return ly
-
-    def build (self):
-        """ Build package output(s), return a list of paths to built packages """
-        raise NotImplementedError
-
-    def cleanup (self):
-        """ Optional cleanup routine for removing temporary files, etc. """
-        pass
-
-    def verify (self, path):
-        """ Optional post-build package verifier """
-        pass
-
-    def render (self, fname, destpath='.'):
-        """ Render template in file fname and save to destpath/fname,
-        where destpath is relative to stpath """
-
-        outf = os.path.join(self.stpath, destpath, fname)
-
-        if not os.path.isdir(os.path.dirname(outf)):
-            os.makedirs(os.path.dirname(outf), 0o0755)
-
-        with open(os.path.join('templates', fname), 'r') as tf:
-            tmpl = Template(tf.read())
-        with open(outf, 'w') as of:
-            of.write(tmpl.substitute(self.kv))
-
-        self.add_file(outf)
-
-
-    def copy_template (self, fname, target_fname=None, destpath='.'):
-        """ Copy template file to destpath/fname
-        where destpath is relative to stpath """
-
-        if target_fname is None:
-            target_fname = fname
-        outf = os.path.join(self.stpath, destpath, target_fname)
-
-        if not os.path.isdir(os.path.dirname(outf)):
-            os.makedirs(os.path.dirname(outf), 0o0755)
-
-        shutil.copy(os.path.join('templates', fname), outf)
-
-        self.add_file(outf)
-
-    def extract_artifacts (self, layout):
-        """ Extract members from artifacts into staging path """
-        print('Extracting artifacts according to layout:')
-        for path, afs in layout.items():
-            artifact = afs[0][0]
-            member = afs[0][1]
-            print('  %s (from %s) -> %s' % (member, artifact, path))
-            outf = os.path.join(self.stpath, path)
-            zfile.ZFile.extract(artifact.lpath, member, outf)
-
-            self.add_file(outf)
-
-        # Rename files, if needed.
-        for root, _, filenames in os.walk(self.stpath):
-            for filename in filenames:
-                fname = os.path.basename(filename)
-                if fname in rename_files:
-                    bpath = os.path.join(root, os.path.dirname(filename))
-                    oldfile = os.path.join(bpath, fname)
-                    newfile = os.path.join(bpath, rename_files[fname])
-                    print('Renaming %s -> %s' % (oldfile, newfile))
-                    os.rename(oldfile, newfile)
-
-        # And rename them in the files map too
-        rename_these = [x for x in self.files.keys() if os.path.basename(x) in rename_files]
-        for oldfile in rename_these:
-            newfile = os.path.join(os.path.dirname(oldfile),
-                                   rename_files[os.path.basename(oldfile)])
-            self.files[newfile] = self.files[oldfile]
-            del self.files[oldfile]
-
-
-
-class NugetPackage (Package):
-    """ All platforms, archs, et.al, are bundled into one set of
-        NuGet output packages: "main", redist and symbols """
-    def __init__ (self, version, arts):
-        if version.startswith('v'):
-            version = version[1:] # Strip v prefix
-        super(NugetPackage, self).__init__(version, arts, "nuget")
-
-    def cleanup(self):
-        if os.path.isdir(self.stpath):
-            shutil.rmtree(self.stpath)
-
-    def build (self, buildtype):
-        """ Build single NuGet package for all its artifacts. """
-        layout = self.xlayout()
-
-        # NuGet removes the prefixing v from the version.
-        vless_version = self.kv['version']
-        if vless_version[0] == 'v':
-            vless_version = vless_version[1:]
-
-
-        self.stpath = tempfile.mkdtemp(prefix="out-", suffix="-%s" % buildtype,
-                                       dir=".")
-
-        self.render('librdkafka.redist.nuspec')
-        self.copy_template('librdkafka.redist.targets',
-                           destpath=os.path.join('build', 'native'))
-        self.copy_template('librdkafka.redist.props',
-                           destpath=os.path.join('build', 'native'))
-        self.copy_template('librdkafka.redist.props',
-                           destpath=os.path.join('build', 'net'))
-        for f in default_doc:
-            shutil.copy(f, self.stpath)
-
-        self.extract_artifacts(layout)
-
-        print('Tree extracted to %s' % self.stpath)
-
-        # After creating a bare-bone nupkg layout containing the artifacts
-        # and some spec and props files, call the 'nuget' utility to
-        # make a proper nupkg of it (with all the metadata files).
-        subprocess.check_call("./nuget.sh pack %s -BasePath '%s' -NonInteractive" %  \
-                              (os.path.join(self.stpath, 'librdkafka.redist.nuspec'),
-                               self.stpath), shell=True)
-        return ['librdkafka.redist.%s.nupkg' % vless_version]
-
-    def xlayout (self):
-        """ Copy files from artifact nupkgs to new super-layout
-
-            Buildtype: release, debug
-
-            High-level requirements:
-             * provide build artifacts: -> build/
-               - static libraries
-               - header files
-               - layout:
-                  build/native/librdkafka.targets
-                  build/native/lib/<plat>/<arch>/<variant>/<toolset>/{static}
-                  build/native/include/librdkafka/*.h
-
-             * provide runtime artifacts: -> runtimes/
-               - dynamic libraries
-               - possibly symbol files
-               - layout:
-                  runtimes/<plat>-<arch>/native/{dynamic}
-             * both cases:
-               - docs -> ./
-
-            runtimes from https://github.com/dotnet/corefx/blob/master/pkg/Microsoft.NETCore.Platforms/runtime.json
-            * win7-x86
-            * win7-x64
-            * osx
-            * osx-x64
-            * debian-x64
-            * rhel-x64  (rhel.7)
-
-            This gives the following layout:
-            build/native/include/librdkafka/rdkafka.h..
-            build/native/net/librdkafka.redist.props
-
-        """
-
-        # Generate template tokens for artifacts
-        for a in self.arts.artifacts:
-            if 'bldtype' not in a.info:
-                a.info['bldtype'] = 'release'
-
-            a.info['variant'] = '%s-%s-%s' % (a.info.get('plat'),
-                                              a.info.get('arch'),
-                                              a.info.get('bldtype'))
-            if 'toolset' not in a.info:
-                a.info['toolset'] = 'v120'
-
-        nuget_layout = {
-            # Build
-            'build/native/lib/${plat}/${arch}/${variant}/${toolset}/': 'static',
-            'build/native/include/librdkafka/': 'include',
-
-            # Runtime
-            'runtimes/${plat}-${arch}/native/': 'dynamic',
-
-            # All
-            'content/docs/': 'doc'
-        }
-
-        layout = self.layout(nuget_layout)
-
-        errors = 0
-        print(' %s layout:' % self)
-        for path, afs in layout.items():
-            print('  %s provided by:' % path)
-            for a, f in afs:
-                print('    %s from artifact %s (and %d more)' % (f, a.fname, len(afs)-1))
-                break
-            if len(afs) == 0:
-                print('     ERROR: no artifacts found')
-                errors += 1
-        print('')
-
-        if errors > 0:
-            raise Exception('Layout not satisfied by collected artifacts: %d missing' % errors)
-
-        return layout
-
-
-
-
-    def verify (self, path):
-        """ Verify package """
-        expect = ["librdkafka.redist.nuspec",
-                  "LICENSES.txt",
-                  "build/native/librdkafka.redist.props",
-                  "build/native/librdkafka.redist.targets",
-                  "build/native/include/librdkafka/rdkafka.h",
-                  "build/native/include/librdkafka/rdkafkacpp.h",
-                  "build/net/librdkafka.redist.props",
-                  "runtimes/win7-x86/native/librdkafka.dll",
-                  "runtimes/win7-x86/native/librdkafka.lib",
-                  "runtimes/win7-x86/native/zlib.dll",
-                  "runtimes/win7-x86/native/msvcr120.dll",
-                  "runtimes/win7-x64/native/librdkafka.dll",
-                  "runtimes/win7-x64/native/librdkafka.lib",
-                  "runtimes/win7-x64/native/msvcr120.dll",
-                  "runtimes/osx-x64/native/librdkafka++.dylib",
-                  "runtimes/osx-x64/native/librdkafka.dylib",
-                  "runtimes/debian-x64/native/librdkafka++.so",
-                  "runtimes/debian-x64/native/librdkafka.so",
-                  "runtimes/rhel-x64/native/librdkafka++.so",
-                  "runtimes/rhel-x64/native/librdkafka.so"]
-        missing = list()
-        with zfile.ZFile(path, 'r') as zf:
-            print('Verifying %s:' % path)
-
-            # Zipfiles may url-encode filenames, unquote them before matching.
-            pkgd = [urllib.unquote(x) for x in zf.getnames()]
-            missing = [x for x in expect if x not in pkgd]
-
-        if len(missing) > 0:
-            print('Missing files in package %s:\n%s' % (path, '\n'.join(missing)))
-            return False
-        else:
-            print('OK - %d expected files found' % expect)
-            return True
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/nuget/release.py
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/nuget/release.py b/thirdparty/librdkafka-0.11.1/packaging/nuget/release.py
deleted file mode 100755
index 9deaed8..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/nuget/release.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# NuGet release packaging tool.
-# Creates a NuGet package from CI artifacts on S3.
-#
-
-
-import sys
-import argparse
-import packaging
-
-
-dry_run = False
-
-
-
-if __name__ == '__main__':
-
-    parser = argparse.ArgumentParser()
-    parser.add_argument("--no-s3", help="Don't collect from S3", action="store_true")
-    parser.add_argument("--dry-run",
-                        help="Locate artifacts but don't actually download or do anything",
-                        action="store_true")
-    parser.add_argument("--directory", help="Download directory (default: dl-<tag>)", default=None)
-    parser.add_argument("--no-cleanup", help="Don't clean up temporary folders", action="store_true")
-    parser.add_argument("--sha", help="Also match on this git sha1", default=None)
-    parser.add_argument("tag", help="Git tag to collect")
-
-    args = parser.parse_args()
-    dry_run = args.dry_run
-    if not args.directory:
-        args.directory = 'dl-%s' % args.tag
-
-    match = {'tag': args.tag}
-    if args.sha is not None:
-        match['sha'] = args.sha
-
-    arts = packaging.Artifacts(match, args.directory)
-
-    # Collect common local artifacts, such as support files.
-    arts.collect_local('common', req_tag=False)
-
-    if not args.no_s3:
-        arts.collect_s3()
-    else:
-        arts.collect_local(arts.dlpath)
-
-    if len(arts.artifacts) == 0:
-        raise ValueError('No artifacts found for %s' % match)
-
-    print('Collected artifacts:')
-    for a in arts.artifacts:
-        print(' %s' % a.lpath)
-    print('')
-
-    package_for = [packaging.NugetPackage]
-    packages = list()
-
-    print('Packaging classes: %s' % package_for)
-
-    for pcl in package_for:
-        p = pcl(match['tag'], arts)
-        packages.append(p)
-    print('')
-
-    if dry_run:
-        sys.exit(0)
-
-    # Build packages
-    print('Building packages:')
-    pkgfiles = []
-    for p in packages:
-        paths = p.build(buildtype='release')
-        for path in paths:
-            # Verify package files
-            if p.verify(path):
-                pkgfiles.append(path)
-        if not args.no_cleanup:
-            p.cleanup()
-        else:
-            print(' --no-cleanup: leaving %s' % p.stpath)
-    print('')
-
-    if len(pkgfiles) > 0:
-        print('Created packages:')
-        for pkg in pkgfiles:
-            print(pkg)
-    else:
-        print('No packages created')
-        sys.exit(1)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/nuget/requirements.txt
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/nuget/requirements.txt b/thirdparty/librdkafka-0.11.1/packaging/nuget/requirements.txt
deleted file mode 100644
index c892afd..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/nuget/requirements.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-boto3
-rpmfile


[08/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/examples/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/examples/CMakeLists.txt b/thirdparty/librdkafka-0.11.4/examples/CMakeLists.txt
new file mode 100644
index 0000000..dae7f9a
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/examples/CMakeLists.txt
@@ -0,0 +1,30 @@
+if(WIN32)
+    set(win32_sources ../win32/wingetopt.c ../win32/wingetopt.h)
+    set(win32_compile_defs "LIBRDKAFKACPP_EXPORTS=0")
+endif(WIN32)
+
+add_executable(rdkafka_simple_producer rdkafka_simple_producer.c ${win32_sources})
+target_link_libraries(rdkafka_simple_producer PUBLIC rdkafka)
+
+add_executable(rdkafka_performance rdkafka_performance.c ${win32_sources})
+target_link_libraries(rdkafka_performance PUBLIC rdkafka)
+
+add_executable(rdkafka_example_cpp rdkafka_example.cpp ${win32_sources})
+target_link_libraries(rdkafka_example_cpp PUBLIC rdkafka++)
+target_compile_definitions(rdkafka_example_cpp PRIVATE ${win32_compile_defs})
+
+add_executable(rdkafka_consumer_example_cpp rdkafka_consumer_example.cpp ${win32_sources})
+target_link_libraries(rdkafka_consumer_example_cpp PUBLIC rdkafka++)
+target_compile_definitions(rdkafka_consumer_example_cpp PRIVATE ${win32_compile_defs})
+
+# The targets below has Unix include dirs and do not compile on Windows.
+if(NOT WIN32)
+    add_executable(rdkafka_example rdkafka_example.c)
+    target_link_libraries(rdkafka_example PUBLIC rdkafka)
+    
+    add_executable(rdkafka_consumer_example rdkafka_consumer_example.c)
+    target_link_libraries(rdkafka_consumer_example PUBLIC rdkafka)
+    
+    add_executable(kafkatest_verifiable_client kafkatest_verifiable_client.cpp)
+    target_link_libraries(kafkatest_verifiable_client PUBLIC rdkafka++)
+endif(NOT WIN32)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/examples/Makefile
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/examples/Makefile b/thirdparty/librdkafka-0.11.4/examples/Makefile
new file mode 100644
index 0000000..d3e0832
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/examples/Makefile
@@ -0,0 +1,96 @@
+EXAMPLES ?= rdkafka_example rdkafka_performance rdkafka_example_cpp \
+	rdkafka_consumer_example rdkafka_consumer_example_cpp \
+	kafkatest_verifiable_client rdkafka_simple_producer
+
+all: $(EXAMPLES)
+
+include ../mklove/Makefile.base
+
+CFLAGS += -I../src
+CXXFLAGS += -I../src-cpp
+
+# librdkafka must be compiled with -gstrict-dwarf, but rdkafka_example must not,
+# due to some clang bug on OSX 10.9
+CPPFLAGS := $(subst strict-dwarf,,$(CPPFLAGS))
+
+rdkafka_example: ../src/librdkafka.a rdkafka_example.c
+	$(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_example.c -o $@ $(LDFLAGS) \
+		../src/librdkafka.a $(LIBS)
+	@echo "# $@ is ready"
+	@echo "#"
+	@echo "# Run producer (write messages on stdin)"
+	@echo "./$@ -P -t <topic> -p <partition>"
+	@echo ""
+	@echo "# or consumer"
+	@echo "./$@ -C -t <topic> -p <partition>"
+	@echo ""
+	@echo "#"
+	@echo "# More usage options:"
+	@echo "./$@ -h"
+
+rdkafka_simple_producer: ../src/librdkafka.a rdkafka_simple_producer.c
+	$(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+		../src/librdkafka.a $(LIBS)
+
+rdkafka_consumer_example: ../src/librdkafka.a rdkafka_consumer_example.c
+	$(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_consumer_example.c -o $@ $(LDFLAGS) \
+		../src/librdkafka.a $(LIBS)
+	@echo "# $@ is ready"
+	@echo "#"
+	@echo "./$@ <topic[:part]> <topic2[:part]> .."
+	@echo ""
+	@echo "#"
+	@echo "# More usage options:"
+	@echo "./$@ -h"
+
+rdkafka_performance: ../src/librdkafka.a rdkafka_performance.c
+	$(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_performance.c -o $@ $(LDFLAGS) \
+		../src/librdkafka.a $(LIBS)
+	@echo "# $@ is ready"
+	@echo "#"
+	@echo "# Run producer"
+	@echo "./$@ -P -t <topic> -p <partition> -s <msgsize>"
+	@echo ""
+	@echo "# or consumer"
+	@echo "./$@ -C -t <topic> -p <partition>"
+	@echo ""
+	@echo "#"
+	@echo "# More usage options:"
+	@echo "./$@ -h"
+
+
+rdkafka_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_example.cpp
+	$(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_example.cpp -o $@ $(LDFLAGS) \
+		../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++
+
+kafkatest_verifiable_client: ../src-cpp/librdkafka++.a ../src/librdkafka.a kafkatest_verifiable_client.cpp
+	$(CXX) $(CPPFLAGS) $(CXXFLAGS) kafkatest_verifiable_client.cpp -o $@ $(LDFLAGS) \
+		../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++
+
+
+rdkafka_consumer_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_consumer_example.cpp
+	$(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_consumer_example.cpp -o $@ $(LDFLAGS) \
+		../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++
+
+rdkafka_consume_batch: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_consume_batch.cpp
+	$(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_consume_batch.cpp -o $@ $(LDFLAGS) \
+		../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++
+
+rdkafka_zookeeper_example: ../src/librdkafka.a rdkafka_zookeeper_example.c
+	$(CC) $(CPPFLAGS) $(CFLAGS) -I/usr/include/zookeeper rdkafka_zookeeper_example.c -o $@ $(LDFLAGS) \
+		../src/librdkafka.a $(LIBS) -lzookeeper_mt -ljansson
+	@echo "# $@ is ready"
+	@echo "#"
+	@echo "# Run producer (write messages on stdin)"
+	@echo "./$@ -P -t <topic> -p <partition>"
+	@echo ""
+	@echo "# or consumer"
+	@echo "./$@ -C -t <topic> -p <partition>"
+	@echo ""
+	@echo "#"
+	@echo "# More usage options:"
+	@echo "./$@ -h"
+
+clean:
+	rm -f $(EXAMPLES)
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/examples/globals.json
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/examples/globals.json b/thirdparty/librdkafka-0.11.4/examples/globals.json
new file mode 100644
index 0000000..527e126
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/examples/globals.json
@@ -0,0 +1,11 @@
+{"VerifiableConsumer":
+ {
+     "class": "kafkatest.services.verifiable_client.VerifiableClientApp",
+     "exec_cmd": "/vagrant/tests/c/kafkatest_verifiable_client --consumer --debug cgrp,topic,protocol,broker"
+ },
+ "VerifiableProducer":
+ {
+     "class": "kafkatest.services.verifiable_client.VerifiableClientApp",
+     "exec_cmd": "/vagrant/tests/c/kafkatest_verifiable_client --producer --debug topic,broker"
+ }
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/examples/kafkatest_verifiable_client.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/examples/kafkatest_verifiable_client.cpp b/thirdparty/librdkafka-0.11.4/examples/kafkatest_verifiable_client.cpp
new file mode 100644
index 0000000..26e1ae0
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/examples/kafkatest_verifiable_client.cpp
@@ -0,0 +1,960 @@
+/*
+ * Copyright (c) 2015, Confluent Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * librdkafka version of the Java VerifiableProducer and VerifiableConsumer
+ * for use with the official Kafka client tests.
+ */
+
+
+#include <iostream>
+#include <fstream>
+#include <sstream>
+#include <map>
+#include <string>
+#include <algorithm>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+#include <unistd.h>
+#include <sys/time.h>
+#include <assert.h>
+#include <ctype.h>
+#include <strings.h>
+
+#ifdef _MSC_VER
+#include "../win32/wingetopt.h"
+#elif _AIX
+#include <unistd.h>
+#else
+#include <getopt.h>
+#endif
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+static bool run = true;
+static bool exit_eof = false;
+static int verbosity = 1;
+static std::string value_prefix;
+
+class Assignment {
+
+ public:
+  static std::string name (const std::string &t, int partition) {
+    std::stringstream stm;
+    stm << t << "." << partition;
+    return stm.str();
+  }
+
+  Assignment(): topic(""), partition(-1), consumedMessages(0),
+                minOffset(-1), maxOffset(0) {
+    printf("Created assignment\n");
+  }
+  Assignment(const Assignment &a) {
+    topic = a.topic;
+    partition = a.partition;
+    consumedMessages = a.consumedMessages;
+    minOffset = a.minOffset;
+    maxOffset = a.maxOffset;
+  }
+
+  Assignment &operator=(const Assignment &a) {
+    this->topic = a.topic;
+    this->partition = a.partition;
+    this->consumedMessages = a.consumedMessages;
+    this->minOffset = a.minOffset;
+    this->maxOffset = a.maxOffset;
+    return *this;
+  }
+
+  int operator==(const Assignment &a) const {
+    return !(this->topic == a.topic &&
+             this->partition == a.partition);
+  }
+
+  int operator<(const Assignment &a) const {
+    if (this->topic < a.topic) return 1;
+    if (this->topic >= a.topic) return 0;
+    return (this->partition < a.partition);
+  }
+
+  void setup (std::string t, int32_t p) {
+    assert(!t.empty());
+    assert(topic.empty() || topic == t);
+    assert(partition == -1 || partition == p);
+    topic = t;
+    partition = p;
+  }
+
+  std::string topic;
+  int partition;
+  int consumedMessages;
+  int64_t minOffset;
+  int64_t maxOffset;
+};
+
+
+
+
+static struct {
+  int maxMessages;
+
+  struct {
+    int numAcked;
+    int numSent;
+    int numErr;
+  } producer;
+
+  struct {
+    int consumedMessages;
+    int consumedMessagesLastReported;
+    int consumedMessagesAtLastCommit;
+    bool useAutoCommit;
+    std::map<std::string, Assignment> assignments;
+  } consumer;
+} state = {
+  /* .maxMessages = */ -1
+};
+
+
+static RdKafka::KafkaConsumer *consumer;
+
+
+static std::string now () {
+  struct timeval tv;
+  gettimeofday(&tv, NULL);
+  time_t t = tv.tv_sec;
+  struct tm tm;
+  char buf[64];
+
+  localtime_r(&t, &tm);
+  strftime(buf, sizeof(buf), "%H:%M:%S", &tm);
+  snprintf(buf+strlen(buf), sizeof(buf)-strlen(buf), ".%03d",
+           (int)(tv.tv_usec / 1000));
+
+  return buf;
+}
+
+
+static time_t watchdog_last_kick;
+static const int watchdog_timeout = 20; /* Must be > socket.timeout.ms */
+static void sigwatchdog (int sig) {
+  time_t t = time(NULL);
+  if (watchdog_last_kick + watchdog_timeout <= t) {
+    std::cerr << now() << ": WATCHDOG TIMEOUT (" <<
+        (int)(t - watchdog_last_kick) << "s): TERMINATING" << std::endl;
+    int *i = NULL;
+    *i = 100;
+    abort();
+  }
+}
+
+static void watchdog_kick () {
+  watchdog_last_kick = time(NULL);
+
+  /* Safe guard against hangs-on-exit */
+  alarm(watchdog_timeout);
+}
+
+
+
+
+
+static void errorString (const std::string &name,
+                         const std::string &errmsg,
+                         const std::string &topic,
+                         const std::string *key,
+                         const std::string &value) {
+  std::cout << "{ "
+            << "\"name\": \"" << name << "\", "
+            << "\"_time\": \"" << now() << "\", "
+            << "\"message\": \"" << errmsg << "\", "
+            << "\"topic\": \"" << topic << "\", "
+            << "\"key\": \"" << (key ? *key : "NULL") << "\", "
+            << "\"value\": \"" << value << "\" "
+            << "}" << std::endl;
+}
+
+
+static void successString (const std::string &name,
+                           const std::string &topic,
+                           int partition,
+                           int64_t offset,
+                           const std::string *key,
+                           const std::string &value) {
+  std::cout << "{ "
+            << "\"name\": \"" << name << "\", "
+            << "\"_time\": \"" << now() << "\", "
+            << "\"topic\": \"" << topic << "\", "
+            << "\"partition\": " << partition << ", "
+            << "\"offset\": " << offset << ", "
+            << "\"key\": \"" << (key ? *key : "NULL") << "\", "
+            << "\"value\": \"" << value << "\" "
+            << "}" << std::endl;
+}
+
+
+#if FIXME
+static void offsetStatus (bool success,
+                          const std::string &topic,
+                          int partition,
+                          int64_t offset,
+                          const std::string &errstr) {
+  std::cout << "{ "
+      "\"name\": \"offsets_committed\", " <<
+      "\"success\": " << success << ", " <<
+      "\"offsets\": [ " <<
+      " { " <<
+      " \"topic\": \"" << topic << "\", " <<
+      " \"partition\": " << partition << ", " <<
+      " \"offset\": " << (int)offset << ", " <<
+      " \"error\": \"" << errstr << "\" " <<
+      " } " <<
+      "] }" << std::endl;
+
+}
+#endif
+
+
+static void sigterm (int sig) {
+
+  std::cerr << now() << ": Terminating because of signal " << sig << std::endl;
+
+  if (!run) {
+    std::cerr << now() << ": Forced termination" << std::endl;
+    exit(1);
+  }
+  run = false;
+}
+
+
+class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
+ public:
+  void dr_cb (RdKafka::Message &message) {
+    if (message.err()) {
+      state.producer.numErr++;
+      errorString("producer_send_error", message.errstr(),
+                  message.topic_name(),
+                  message.key(),
+                  std::string(static_cast<const char*>(message.payload()),
+                              message.len()));
+    } else {
+      successString("producer_send_success",
+                    message.topic_name(),
+                    (int)message.partition(),
+                    message.offset(),
+                    message.key(),
+                    std::string(static_cast<const char*>(message.payload()),
+                                message.len()));
+      state.producer.numAcked++;
+    }
+  }
+};
+
+
+class ExampleEventCb : public RdKafka::EventCb {
+ public:
+  void event_cb (RdKafka::Event &event) {
+    switch (event.type())
+    {
+      case RdKafka::Event::EVENT_ERROR:
+        std::cerr << now() << ": ERROR (" << RdKafka::err2str(event.err()) << "): " <<
+            event.str() << std::endl;
+        break;
+
+      case RdKafka::Event::EVENT_STATS:
+        std::cerr << now() << ": \"STATS\": " << event.str() << std::endl;
+        break;
+
+      case RdKafka::Event::EVENT_LOG:
+        std::cerr << now() << ": LOG-" << event.severity() << "-"
+                  << event.fac() << ": " << event.str() << std::endl;
+        break;
+
+      default:
+        std::cerr << now() << ": EVENT " << event.type() <<
+            " (" << RdKafka::err2str(event.err()) << "): " <<
+            event.str() << std::endl;
+        break;
+    }
+  }
+};
+
+
+/* Use of this partitioner is pretty pointless since no key is provided
+ * in the produce() call. */
+class MyHashPartitionerCb : public RdKafka::PartitionerCb {
+ public:
+  int32_t partitioner_cb (const RdKafka::Topic *topic, const std::string *key,
+                          int32_t partition_cnt, void *msg_opaque) {
+    return djb_hash(key->c_str(), key->size()) % partition_cnt;
+  }
+ private:
+
+  static inline unsigned int djb_hash (const char *str, size_t len) {
+    unsigned int hash = 5381;
+    for (size_t i = 0 ; i < len ; i++)
+      hash = ((hash << 5) + hash) + str[i];
+    return hash;
+  }
+};
+
+
+
+
+
+/**
+ * Print number of records consumed, every 100 messages or on timeout.
+ */
+static void report_records_consumed (int immediate) {
+  std::map<std::string,Assignment> *assignments = &state.consumer.assignments;
+
+  if (state.consumer.consumedMessages <=
+      state.consumer.consumedMessagesLastReported + (immediate ? 0 : 999))
+    return;
+
+  std::cout << "{ "
+      "\"name\": \"records_consumed\", " <<
+      "\"_totcount\": " << state.consumer.consumedMessages << ", " <<
+      "\"count\": " << (state.consumer.consumedMessages -
+                        state.consumer.consumedMessagesLastReported) << ", " <<
+      "\"partitions\": [ ";
+
+  for (std::map<std::string,Assignment>::iterator ii = assignments->begin() ;
+       ii != assignments->end() ; ii++) {
+    Assignment *a = &(*ii).second;
+    assert(!a->topic.empty());
+    std::cout << (ii == assignments->begin() ? "": ", ") << " { " <<
+        " \"topic\": \"" << a->topic << "\", " <<
+        " \"partition\": " << a->partition << ", " <<
+        " \"minOffset\": " << a->minOffset << ", " <<
+        " \"maxOffset\": " << a->maxOffset << " " <<
+        " } ";
+    a->minOffset = -1;
+  }
+
+  std::cout << "] }" << std::endl;
+
+  state.consumer.consumedMessagesLastReported = state.consumer.consumedMessages;
+}
+
+
+class ExampleOffsetCommitCb : public RdKafka::OffsetCommitCb {
+ public:
+  void offset_commit_cb (RdKafka::ErrorCode err,
+                         std::vector<RdKafka::TopicPartition*> &offsets) {
+    std::cerr << now() << ": Propagate offset for " << offsets.size() << " partitions, error: " << RdKafka::err2str(err) << std::endl;
+
+    /* No offsets to commit, dont report anything. */
+    if (err == RdKafka::ERR__NO_OFFSET)
+      return;
+
+    /* Send up-to-date records_consumed report to make sure consumed > committed */
+    report_records_consumed(1);
+
+    std::cout << "{ " <<
+        "\"name\": \"offsets_committed\", " <<
+        "\"success\": " << (err ? "false" : "true") << ", " <<
+        "\"error\": \"" << (err ? RdKafka::err2str(err) : "") << "\", " <<
+        "\"_autocommit\": " << (state.consumer.useAutoCommit ? "true":"false") << ", " <<
+        "\"offsets\": [ ";
+    assert(offsets.size() > 0);
+    for (unsigned int i = 0 ; i < offsets.size() ; i++) {
+      std::cout << (i == 0 ? "" : ", ") << "{ " <<
+          " \"topic\": \"" << offsets[i]->topic() << "\", " <<
+          " \"partition\": " << offsets[i]->partition() << ", " <<
+          " \"offset\": " << (int)offsets[i]->offset() << ", " <<
+          " \"error\": \"" <<
+          (offsets[i]->err() ? RdKafka::err2str(offsets[i]->err()) : "") <<
+          "\" " <<
+          " }";
+    }
+    std::cout << " ] }" << std::endl;
+
+  }
+};
+
+static ExampleOffsetCommitCb ex_offset_commit_cb;
+
+
+/**
+ * Commit every 1000 messages or whenever there is a consume timeout.
+ */
+static void do_commit (RdKafka::KafkaConsumer *consumer,
+                      int immediate) {
+  if (!immediate &&
+      (state.consumer.useAutoCommit ||
+       state.consumer.consumedMessagesAtLastCommit + 1000 >
+       state.consumer.consumedMessages))
+    return;
+
+  /* Make sure we report consumption before commit,
+   * otherwise tests may fail because of commit > consumed. */
+  if (state.consumer.consumedMessagesLastReported <
+      state.consumer.consumedMessages)
+    report_records_consumed(1);
+
+  std::cerr << now() << ": committing " <<
+    (state.consumer.consumedMessages -
+     state.consumer.consumedMessagesAtLastCommit) << " messages" << std::endl;
+
+  RdKafka::ErrorCode err;
+  err = consumer->commitSync(&ex_offset_commit_cb);
+
+  std::cerr << now() << ": " <<
+    "sync commit returned " << RdKafka::err2str(err) << std::endl;
+
+  state.consumer.consumedMessagesAtLastCommit =
+    state.consumer.consumedMessages;
+}
+
+
+void msg_consume(RdKafka::KafkaConsumer *consumer,
+                 RdKafka::Message* msg, void* opaque) {
+  switch (msg->err()) {
+    case RdKafka::ERR__TIMED_OUT:
+      /* Try reporting consumed messages */
+      report_records_consumed(1);
+      /* Commit one every consume() timeout instead of on every message.
+       * Also commit on every 1000 messages, whichever comes first. */
+      do_commit(consumer, 1);
+      break;
+
+
+    case RdKafka::ERR_NO_ERROR:
+      {
+        /* Real message */
+        if (verbosity > 2)
+          std::cerr << now() << ": Read msg from " << msg->topic_name() <<
+              " [" << (int)msg->partition() << "]  at offset " <<
+              msg->offset() << std::endl;
+
+        if (state.maxMessages >= 0 &&
+            state.consumer.consumedMessages >= state.maxMessages)
+          return;
+
+
+        Assignment *a =
+            &state.consumer.assignments[Assignment::name(msg->topic_name(),
+                                                         msg->partition())];
+        a->setup(msg->topic_name(), msg->partition());
+
+        a->consumedMessages++;
+        if (a->minOffset == -1)
+          a->minOffset = msg->offset();
+        if (a->maxOffset < msg->offset())
+          a->maxOffset = msg->offset();
+
+        if (msg->key()) {
+          if (verbosity >= 3)
+            std::cerr << now() << ": Key: " << *msg->key() << std::endl;
+        }
+
+        if (verbosity >= 3)
+          fprintf(stderr, "%.*s\n",
+                  static_cast<int>(msg->len()),
+                  static_cast<const char *>(msg->payload()));
+
+        state.consumer.consumedMessages++;
+
+        report_records_consumed(0);
+
+        do_commit(consumer, 0);
+      }
+      break;
+
+    case RdKafka::ERR__PARTITION_EOF:
+      /* Last message */
+      if (exit_eof) {
+        std::cerr << now() << ": Terminate: exit on EOF" << std::endl;
+        run = false;
+      }
+      break;
+
+    case RdKafka::ERR__UNKNOWN_TOPIC:
+    case RdKafka::ERR__UNKNOWN_PARTITION:
+      std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl;
+      run = false;
+      break;
+
+    case RdKafka::ERR_GROUP_COORDINATOR_NOT_AVAILABLE:
+      std::cerr << now() << ": Warning: " << msg->errstr() << std::endl;
+      break;
+
+    default:
+      /* Errors */
+      std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl;
+      run = false;
+  }
+}
+
+
+
+
+class ExampleConsumeCb : public RdKafka::ConsumeCb {
+ public:
+  void consume_cb (RdKafka::Message &msg, void *opaque) {
+    msg_consume(consumer_, &msg, opaque);
+  }
+  RdKafka::KafkaConsumer *consumer_;
+};
+
+class ExampleRebalanceCb : public RdKafka::RebalanceCb {
+ private:
+  static std::string part_list_json (const std::vector<RdKafka::TopicPartition*> &partitions) {
+    std::ostringstream out;
+    for (unsigned int i = 0 ; i < partitions.size() ; i++)
+      out << (i==0?"":", ") << "{ " <<
+          " \"topic\": \"" << partitions[i]->topic() << "\", " <<
+          " \"partition\": " << partitions[i]->partition() <<
+          " }";
+    return out.str();
+  }
+ public:
+  void rebalance_cb (RdKafka::KafkaConsumer *consumer,
+                     RdKafka::ErrorCode err,
+                     std::vector<RdKafka::TopicPartition*> &partitions) {
+
+    std::cerr << now() << ": rebalance_cb " << RdKafka::err2str(err) <<
+        " for " << partitions.size() << " partitions" << std::endl;
+    /* Send message report prior to rebalancing event to make sure they
+     * are accounted for on the "right side" of the rebalance. */
+    report_records_consumed(1);
+
+    if (err == RdKafka::ERR__ASSIGN_PARTITIONS)
+      consumer->assign(partitions);
+    else {
+      do_commit(consumer, 1);
+      consumer->unassign();
+    }
+
+    std::cout <<
+      "{ " <<
+      "\"name\": \"partitions_" << (err == RdKafka::ERR__ASSIGN_PARTITIONS ?
+                                    "assigned" : "revoked") << "\", " <<
+      "\"partitions\": [ " << part_list_json(partitions) << "] }" << std::endl;
+
+  }
+};
+
+
+
+/**
+ * @brief Read (Java client) configuration file
+ */
+static void read_conf_file (RdKafka::Conf *conf, const std::string &conf_file) {
+  std::ifstream inf(conf_file.c_str());
+
+  if (!inf) {
+    std::cerr << now() << ": " << conf_file << ": could not open file" << std::endl;
+    exit(1);
+  }
+
+  std::cerr << now() << ": " << conf_file << ": read config file" << std::endl;
+
+  std::string line;
+  int linenr = 0;
+
+  while (std::getline(inf, line)) {
+    linenr++;
+
+    // Ignore comments and empty lines
+    if (line[0] == '#' || line.length() == 0)
+      continue;
+
+    // Match on key=value..
+    size_t d = line.find("=");
+    if (d == 0 || d == std::string::npos) {
+      std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << line << ": ignoring invalid line (expect key=value): " << ::std::endl;
+      continue;
+    }
+
+    std::string key = line.substr(0, d);
+    std::string val = line.substr(d+1);
+
+    std::string errstr;
+    if (conf->set(key, val, errstr)) {
+      std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key << "=" << val << ": " << errstr << ": ignoring error" << std::endl;
+    } else {
+      std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key << "=" << val << ": applied to configuration" << std::endl;
+    }
+  }
+
+  inf.close();
+}
+
+
+
+
+int main (int argc, char **argv) {
+  std::string brokers = "localhost";
+  std::string errstr;
+  std::vector<std::string> topics;
+  std::string mode = "P";
+  int throughput = 0;
+  int32_t partition = RdKafka::Topic::PARTITION_UA;
+  MyHashPartitionerCb hash_partitioner;
+  int64_t create_time = -1;
+
+  std::cerr << now() << ": librdkafka version " << RdKafka::version_str() <<
+    " (" << RdKafka::version() << ")" << std::endl;
+
+  /*
+   * Create configuration objects
+   */
+  RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+  /* Java VerifiableProducer defaults to acks=all */
+  if (conf->set("acks", "all", errstr)) {
+    std::cerr << now() << ": " << errstr << std::endl;
+    exit(1);
+  }
+
+  /* Avoid slow shutdown on error */
+  if (conf->set("message.timeout.ms", "60000", errstr)) {
+    std::cerr << now() << ": " << errstr << std::endl;
+    exit(1);
+  }
+
+  {
+    char hostname[128];
+    gethostname(hostname, sizeof(hostname)-1);
+    conf->set("client.id", std::string("rdkafka@") + hostname, errstr);
+  }
+
+  conf->set("log.thread.name", "true", errstr);
+
+  /* correct producer offsets */
+  conf->set("produce.offset.report", "true", errstr);
+
+  /* auto commit is explicitly enabled with --enable-autocommit */
+  conf->set("enable.auto.commit", "false", errstr);
+
+  /* keep protocol request timeouts under the watchdog timeout
+   * to make sure things like commitSync() dont fall victim to the watchdog. */
+  conf->set("socket.timeout.ms", "10000", errstr);
+
+  conf->set("fetch.wait.max.ms", "500", errstr);
+  conf->set("fetch.min.bytes", "4096", errstr);
+
+  for (int i = 1 ; i < argc ; i++) {
+    const char *name = argv[i];
+    const char *val = i+1 < argc ? argv[i+1] : NULL;
+
+    if (val && !strncmp(val, "-", 1))
+      val = NULL;
+
+    std::cout << now() << ": argument: " << name << " " <<
+        (val?val:"") << std::endl;
+
+    if (val) {
+      if (!strcmp(name, "--topic"))
+        topics.push_back(val);
+      else if (!strcmp(name, "--broker-list"))
+        brokers = val;
+      else if (!strcmp(name, "--max-messages"))
+        state.maxMessages = atoi(val);
+      else if (!strcmp(name, "--throughput"))
+        throughput = atoi(val);
+      else if (!strcmp(name, "--producer.config") ||
+               !strcmp(name, "--consumer.config"))
+        read_conf_file(conf, val);
+      else if (!strcmp(name, "--group-id"))
+        conf->set("group.id", val, errstr);
+      else if (!strcmp(name, "--session-timeout"))
+        conf->set("session.timeout.ms", val, errstr);
+      else if (!strcmp(name, "--reset-policy")) {
+        if (conf->set("auto.offset.reset", val, errstr)) {
+          std::cerr << now() << ": " << errstr << std::endl;
+          exit(1);
+        }
+      } else if (!strcmp(name, "--assignment-strategy")) {
+        /* The system tests pass the Java class name(s) rather than
+         * the configuration value. Fix it.
+         * "org.apache.kafka.clients.consumer.RangeAssignor,.." -> "range,.."
+         */
+        std::string s = val;
+        size_t pos;
+
+        while ((pos = s.find("org.apache.kafka.clients.consumer.")) !=
+               std::string::npos)
+          s.erase(pos, strlen("org.apache.kafka.clients.consumer."));
+
+        while ((pos = s.find("Assignor")) != std::string::npos)
+          s.erase(pos, strlen("Assignor"));
+
+        std::transform(s.begin(), s.end(), s.begin(), tolower);
+
+        std::cerr << now() << ": converted " << name << " "
+                  << val << " to " << s << std::endl;
+
+        if  (conf->set("partition.assignment.strategy", s.c_str(), errstr)) {
+          std::cerr << now() << ": " << errstr << std::endl;
+          exit(1);
+        }
+      } else if (!strcmp(name, "--value-prefix")) {
+        value_prefix = std::string(val) + ".";
+      } else if (!strcmp(name, "--acks")) {
+       if (conf->set("acks", val, errstr)) {
+         std::cerr << now() << ": " << errstr << std::endl;
+         exit(1);
+       }
+      } else if (!strcmp(name, "--message-create-time")) {
+       create_time = (int64_t)atoi(val);
+      } else if (!strcmp(name, "--debug")) {
+        conf->set("debug", val, errstr);
+      } else if (!strcmp(name, "-X")) {
+        char *s = strdup(val);
+        char *t = strchr(s, '=');
+        if (!t)
+          t = (char *)"";
+        else {
+          *t = '\0';
+          t++;
+        }
+        if (conf->set(s, t, errstr)) {
+          std::cerr << now() << ": " << errstr << std::endl;
+          exit(1);
+        }
+        free(s);
+      } else {
+        std::cerr << now() << ": Unknown option " << name << std::endl;
+        exit(1);
+      }
+
+      i++;
+
+    } else {
+      if (!strcmp(name, "--consumer"))
+        mode = "C";
+      else if (!strcmp(name, "--producer"))
+        mode = "P";
+      else if (!strcmp(name, "--enable-autocommit")) {
+        state.consumer.useAutoCommit = true;
+        conf->set("enable.auto.commit", "true", errstr);
+      } else if (!strcmp(name, "-v"))
+        verbosity++;
+      else if (!strcmp(name, "-q"))
+        verbosity--;
+      else {
+        std::cerr << now() << ": Unknown option or missing argument to " << name << std::endl;
+        exit(1);
+      }
+    }
+  }
+
+  if (topics.empty() || brokers.empty()) {
+    std::cerr << now() << ": Missing --topic and --broker-list" << std::endl;
+    exit(1);
+  }
+
+
+  /*
+   * Set configuration properties
+   */
+  conf->set("metadata.broker.list", brokers, errstr);
+
+  ExampleEventCb ex_event_cb;
+  conf->set("event_cb", &ex_event_cb, errstr);
+
+  signal(SIGINT, sigterm);
+  signal(SIGTERM, sigterm);
+  signal(SIGALRM,  sigwatchdog);
+
+
+  if (mode == "P") {
+    /*
+     * Producer mode
+     */
+
+    ExampleDeliveryReportCb ex_dr_cb;
+
+    /* Set delivery report callback */
+    conf->set("dr_cb", &ex_dr_cb, errstr);
+
+    /*
+     * Create producer using accumulated global configuration.
+     */
+    RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+    if (!producer) {
+      std::cerr << now() << ": Failed to create producer: " << errstr << std::endl;
+      exit(1);
+    }
+
+    std::cerr << now() << ": % Created producer " << producer->name() << std::endl;
+
+    /*
+     * Create topic handle.
+     */
+    RdKafka::Topic *topic = RdKafka::Topic::create(producer, topics[0],
+                                                   NULL, errstr);
+    if (!topic) {
+      std::cerr << now() << ": Failed to create topic: " << errstr << std::endl;
+      exit(1);
+    }
+
+    static const int delay_us = throughput ? 1000000/throughput : 10;
+
+    if (state.maxMessages == -1)
+      state.maxMessages = 1000000; /* Avoid infinite produce */
+
+    for (int i = 0 ; run && i < state.maxMessages ; i++) {
+      /*
+       * Produce message
+       */
+      std::ostringstream msg;
+      msg << value_prefix << i;
+      while (true) {
+        RdKafka::ErrorCode resp;
+       if (create_time == -1) {
+         resp = producer->produce(topic, partition,
+                                  RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
+                                  const_cast<char *>(msg.str().c_str()),
+                                  msg.str().size(), NULL, NULL);
+       } else {
+         resp = producer->produce(topics[0], partition,
+                                  RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
+                                  const_cast<char *>(msg.str().c_str()),
+                                  msg.str().size(),
+                                  NULL, 0,
+                                  create_time,
+                                  NULL);
+       }
+
+        if (resp == RdKafka::ERR__QUEUE_FULL) {
+          producer->poll(100);
+          continue;
+        } else if (resp != RdKafka::ERR_NO_ERROR) {
+          errorString("producer_send_error",
+                      RdKafka::err2str(resp), topic->name(), NULL, msg.str());
+          state.producer.numErr++;
+        } else {
+          state.producer.numSent++;
+        }
+        break;
+      }
+
+      producer->poll(delay_us / 1000);
+      usleep(1000);
+      watchdog_kick();
+    }
+    run = true;
+
+    while (run && producer->outq_len() > 0) {
+      std::cerr << now() << ": Waiting for " << producer->outq_len() << std::endl;
+      producer->poll(1000);
+      watchdog_kick();
+    }
+
+    std::cerr << now() << ": " << state.producer.numAcked << "/" <<
+        state.producer.numSent << "/" << state.maxMessages <<
+        " msgs acked/sent/max, " << state.producer.numErr <<
+        " errored" << std::endl;
+
+    delete topic;
+    delete producer;
+
+
+  } else if (mode == "C") {
+    /*
+     * Consumer mode
+     */
+
+    conf->set("auto.offset.reset", "smallest", errstr);
+
+    ExampleRebalanceCb ex_rebalance_cb;
+    conf->set("rebalance_cb", &ex_rebalance_cb, errstr);
+
+    conf->set("offset_commit_cb", &ex_offset_commit_cb, errstr);
+
+
+    /*
+     * Create consumer using accumulated global configuration.
+     */
+    consumer = RdKafka::KafkaConsumer::create(conf, errstr);
+    if (!consumer) {
+      std::cerr << now() << ": Failed to create consumer: " <<
+          errstr << std::endl;
+      exit(1);
+    }
+
+    std::cerr << now() << ": % Created consumer " << consumer->name() <<
+        std::endl;
+
+    /*
+     * Subscribe to topic(s)
+     */
+    RdKafka::ErrorCode resp = consumer->subscribe(topics);
+    if (resp != RdKafka::ERR_NO_ERROR) {
+      std::cerr << now() << ": Failed to subscribe to " << topics.size() << " topics: "
+                << RdKafka::err2str(resp) << std::endl;
+      exit(1);
+    }
+
+    watchdog_kick();
+
+    /*
+     * Consume messages
+     */
+    while (run) {
+      RdKafka::Message *msg = consumer->consume(500);
+      msg_consume(consumer, msg, NULL);
+      delete msg;
+      watchdog_kick();
+    }
+
+    std::cerr << now() << ": Final commit on termination" << std::endl;
+
+    /* Final commit */
+    do_commit(consumer, 1);
+
+    /*
+     * Stop consumer
+     */
+    consumer->close();
+
+    delete consumer;
+  }
+
+  std::cout << "{ \"name\": \"shutdown_complete\" }" << std::endl;
+
+  /*
+   * Wait for RdKafka to decommission.
+   * This is not strictly needed (when check outq_len() above), but
+   * allows RdKafka to clean up all its resources before the application
+   * exits so that memory profilers such as valgrind wont complain about
+   * memory leaks.
+   */
+  RdKafka::wait_destroyed(5000);
+
+  std::cerr << now() << ": EXITING WITH RETURN VALUE 0" << std::endl;
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/examples/rdkafka_consume_batch.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/examples/rdkafka_consume_batch.cpp b/thirdparty/librdkafka-0.11.4/examples/rdkafka_consume_batch.cpp
new file mode 100644
index 0000000..ea4a169
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/examples/rdkafka_consume_batch.cpp
@@ -0,0 +1,260 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer example programs
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ *
+ * This example shows how to read batches of messages.
+ * Note that messages are fetched from the broker in batches regardless
+ * of how the application polls messages from librdkafka, this example
+ * merely shows how to accumulate a set of messages in the application.
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+
+#ifndef _MSC_VER
+#include <sys/time.h>
+#endif
+
+#ifdef _MSC_VER
+#include "../win32/wingetopt.h"
+#include <atltime.h>
+#elif _AIX
+#include <unistd.h>
+#else
+#include <getopt.h>
+#include <unistd.h>
+#endif
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+
+
+static bool run = true;
+
+static void sigterm (int sig) {
+  run = false;
+}
+
+
+
+/**
+ * @returns the current wall-clock time in milliseconds
+ */
+static int64_t now () {
+#ifndef _MSC_VER
+        struct timeval tv;
+        gettimeofday(&tv, NULL);
+        return ((int64_t)tv.tv_sec * 1000) + (tv.tv_usec / 1000);
+#else
+#error "now() not implemented for Windows, please submit a PR"
+#endif
+}
+
+
+
+/**
+ * @brief Accumulate a batch of \p batch_size messages, but wait
+ *        no longer than \p batch_tmout milliseconds.
+ */
+static std::vector<RdKafka::Message *>
+consume_batch (RdKafka::KafkaConsumer *consumer, size_t batch_size, int batch_tmout) {
+
+  std::vector<RdKafka::Message *> msgs;
+  msgs.reserve(batch_size);
+
+  int64_t end = now() + batch_tmout;
+  int remaining_timeout = batch_tmout;
+
+  while (msgs.size() < batch_size) {
+    RdKafka::Message *msg = consumer->consume(remaining_timeout);
+
+    switch (msg->err()) {
+    case RdKafka::ERR__TIMED_OUT:
+      delete msg;
+      return msgs;
+
+    case RdKafka::ERR_NO_ERROR:
+      msgs.push_back(msg);
+      break;
+
+    default:
+      std::cerr << "%% Consumer error: " << msg->errstr() << std::endl;
+      run = false;
+      delete msg;
+      return msgs;
+    }
+
+    remaining_timeout = end - now();
+    if (remaining_timeout < 0)
+      break;
+  }
+
+  return msgs;
+}
+
+
+int main (int argc, char **argv) {
+  std::string errstr;
+  std::string topic_str;
+  std::vector<std::string> topics;
+  int batch_size = 100;
+  int batch_tmout = 1000;
+
+  /* Create configuration objects */
+  RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+  if (conf->set("enable.partition.eof", "false", errstr) != RdKafka::Conf::CONF_OK) {
+    std::cerr << errstr << std::endl;
+    exit(1);
+  }
+
+  /* Read command line arguments */
+  int opt;
+  while ((opt = getopt(argc, argv, "g:B:T::b:X:")) != -1) {
+    switch (opt) {
+    case 'g':
+      if (conf->set("group.id",  optarg, errstr) != RdKafka::Conf::CONF_OK) {
+        std::cerr << errstr << std::endl;
+        exit(1);
+      }
+      break;
+
+    case 'B':
+      batch_size = atoi(optarg);
+      break;
+
+    case 'T':
+      batch_tmout = atoi(optarg);
+      break;
+
+    case 'b':
+      if (conf->set("bootstrap.servers", optarg, errstr) != RdKafka::Conf::CONF_OK) {
+        std::cerr << errstr << std::endl;
+        exit(1);
+      }
+      break;
+
+    case 'X':
+      {
+        char *name, *val;
+
+        name = optarg;
+        if (!(val = strchr(name, '='))) {
+          std::cerr << "%% Expected -X property=value, not " <<
+              name << std::endl;
+          exit(1);
+        }
+
+        *val = '\0';
+        val++;
+
+        if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) {
+          std::cerr << errstr << std::endl;
+          exit(1);
+        }
+      }
+      break;
+
+    default:
+      goto usage;
+    }
+  }
+
+  /* Topics to consume */
+  for (; optind < argc ; optind++)
+    topics.push_back(std::string(argv[optind]));
+
+  if (topics.empty() || optind != argc) {
+  usage:
+    fprintf(stderr,
+            "Usage: %s -g <group-id> -B <batch-size> [options] topic1 topic2..\n"
+            "\n"
+            "librdkafka version %s (0x%08x)\n"
+            "\n"
+            " Options:\n"
+            "  -g <group-id>    Consumer group id\n"
+            "  -B <batch-size>  How many messages to batch (default: 100).\n"
+            "  -T <batch-tmout> How long to wait for batch-size to accumulate in milliseconds. (default 1000 ms)\n"
+            "  -b <brokers>    Broker address (localhost:9092)\n"
+            "  -X <prop=name>  Set arbitrary librdkafka configuration property\n"
+            "\n",
+            argv[0],
+            RdKafka::version_str().c_str(), RdKafka::version());
+        exit(1);
+  }
+
+
+  signal(SIGINT, sigterm);
+  signal(SIGTERM, sigterm);
+
+  /* Create consumer */
+  RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create(conf, errstr);
+  if (!consumer) {
+    std::cerr << "Failed to create consumer: " << errstr << std::endl;
+    exit(1);
+  }
+
+  delete conf;
+
+  /* Subscribe to topics */
+  RdKafka::ErrorCode err = consumer->subscribe(topics);
+  if (err) {
+    std::cerr << "Failed to subscribe to " << topics.size() << " topics: "
+              << RdKafka::err2str(err) << std::endl;
+    exit(1);
+  }
+
+  /* Consume messages in batches of \p batch_size */
+  while (run) {
+    auto msgs = consume_batch(consumer, batch_size, batch_tmout);
+    std::cout << "Accumulated " << msgs.size() << " messages:" << std::endl;
+
+    for (auto &msg : msgs) {
+      std::cout << " Message in " << msg->topic_name() << " [" << msg->partition() << "] at offset " << msg->offset() << std::endl;
+      delete msg;
+    }
+  }
+
+  /* Close and destroy consumer */
+  consumer->close();
+  delete consumer;
+
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/examples/rdkafka_consumer_example.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/examples/rdkafka_consumer_example.c b/thirdparty/librdkafka-0.11.4/examples/rdkafka_consumer_example.c
new file mode 100644
index 0000000..3896df8
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/examples/rdkafka_consumer_example.c
@@ -0,0 +1,624 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2015, Magnus Edenhill
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met: 
+ * 
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer. 
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution. 
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka high level consumer example program
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <ctype.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <syslog.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <getopt.h>
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"  /* for Kafka driver */
+
+
+static int run = 1;
+static rd_kafka_t *rk;
+static int exit_eof = 0;
+static int wait_eof = 0;  /* number of partitions awaiting EOF */
+static int quiet = 0;
+static 	enum {
+	OUTPUT_HEXDUMP,
+	OUTPUT_RAW,
+} output = OUTPUT_HEXDUMP;
+
+static void stop (int sig) {
+        if (!run)
+                exit(1);
+	run = 0;
+	fclose(stdin); /* abort fgets() */
+}
+
+
+static void hexdump (FILE *fp, const char *name, const void *ptr, size_t len) {
+	const char *p = (const char *)ptr;
+	unsigned int of = 0;
+
+
+	if (name)
+		fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
+
+	for (of = 0 ; of < len ; of += 16) {
+		char hexen[16*3+1];
+		char charen[16+1];
+		int hof = 0;
+
+		int cof = 0;
+		int i;
+
+		for (i = of ; i < (int)of + 16 && i < (int)len ; i++) {
+			hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff);
+			cof += sprintf(charen+cof, "%c",
+				       isprint((int)p[i]) ? p[i] : '.');
+		}
+		fprintf(fp, "%08x: %-48s %-16s\n",
+			of, hexen, charen);
+	}
+}
+
+/**
+ * Kafka logger callback (optional)
+ */
+static void logger (const rd_kafka_t *rk, int level,
+		    const char *fac, const char *buf) {
+	struct timeval tv;
+	gettimeofday(&tv, NULL);
+	fprintf(stdout, "%u.%03u RDKAFKA-%i-%s: %s: %s\n",
+		(int)tv.tv_sec, (int)(tv.tv_usec / 1000),
+		level, fac, rd_kafka_name(rk), buf);
+}
+
+
+
+/**
+ * Handle and print a consumed message.
+ * Internally crafted messages are also used to propagate state from
+ * librdkafka to the application. The application needs to check
+ * the `rkmessage->err` field for this purpose.
+ */
+static void msg_consume (rd_kafka_message_t *rkmessage) {
+	if (rkmessage->err) {
+		if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+			fprintf(stderr,
+				"%% Consumer reached end of %s [%"PRId32"] "
+			       "message queue at offset %"PRId64"\n",
+			       rd_kafka_topic_name(rkmessage->rkt),
+			       rkmessage->partition, rkmessage->offset);
+
+			if (exit_eof && --wait_eof == 0) {
+                                fprintf(stderr,
+                                        "%% All partition(s) reached EOF: "
+                                        "exiting\n");
+				run = 0;
+                        }
+
+			return;
+		}
+
+                if (rkmessage->rkt)
+                        fprintf(stderr, "%% Consume error for "
+                                "topic \"%s\" [%"PRId32"] "
+                                "offset %"PRId64": %s\n",
+                                rd_kafka_topic_name(rkmessage->rkt),
+                                rkmessage->partition,
+                                rkmessage->offset,
+                                rd_kafka_message_errstr(rkmessage));
+                else
+                        fprintf(stderr, "%% Consumer error: %s: %s\n",
+                                rd_kafka_err2str(rkmessage->err),
+                                rd_kafka_message_errstr(rkmessage));
+
+                if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
+                    rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+                        run = 0;
+		return;
+	}
+
+	if (!quiet)
+		fprintf(stdout, "%% Message (topic %s [%"PRId32"], "
+                        "offset %"PRId64", %zd bytes):\n",
+                        rd_kafka_topic_name(rkmessage->rkt),
+                        rkmessage->partition,
+			rkmessage->offset, rkmessage->len);
+
+	if (rkmessage->key_len) {
+		if (output == OUTPUT_HEXDUMP)
+			hexdump(stdout, "Message Key",
+				rkmessage->key, rkmessage->key_len);
+		else
+			printf("Key: %.*s\n",
+			       (int)rkmessage->key_len, (char *)rkmessage->key);
+	}
+
+	if (output == OUTPUT_HEXDUMP)
+		hexdump(stdout, "Message Payload",
+			rkmessage->payload, rkmessage->len);
+	else
+		printf("%.*s\n",
+		       (int)rkmessage->len, (char *)rkmessage->payload);
+}
+
+
+static void print_partition_list (FILE *fp,
+                                  const rd_kafka_topic_partition_list_t
+                                  *partitions) {
+        int i;
+        for (i = 0 ; i < partitions->cnt ; i++) {
+                fprintf(stderr, "%s %s [%"PRId32"] offset %"PRId64,
+                        i > 0 ? ",":"",
+                        partitions->elems[i].topic,
+                        partitions->elems[i].partition,
+			partitions->elems[i].offset);
+        }
+        fprintf(stderr, "\n");
+
+}
+static void rebalance_cb (rd_kafka_t *rk,
+                          rd_kafka_resp_err_t err,
+			  rd_kafka_topic_partition_list_t *partitions,
+                          void *opaque) {
+
+	fprintf(stderr, "%% Consumer group rebalanced: ");
+
+	switch (err)
+	{
+	case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+		fprintf(stderr, "assigned:\n");
+		print_partition_list(stderr, partitions);
+		rd_kafka_assign(rk, partitions);
+		wait_eof += partitions->cnt;
+		break;
+
+	case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+		fprintf(stderr, "revoked:\n");
+		print_partition_list(stderr, partitions);
+		rd_kafka_assign(rk, NULL);
+		wait_eof = 0;
+		break;
+
+	default:
+		fprintf(stderr, "failed: %s\n",
+                        rd_kafka_err2str(err));
+                rd_kafka_assign(rk, NULL);
+		break;
+	}
+}
+
+
+static int describe_groups (rd_kafka_t *rk, const char *group) {
+        rd_kafka_resp_err_t err;
+        const struct rd_kafka_group_list *grplist;
+        int i;
+
+        err = rd_kafka_list_groups(rk, group, &grplist, 10000);
+
+        if (err) {
+                fprintf(stderr, "%% Failed to acquire group list: %s\n",
+                        rd_kafka_err2str(err));
+                return -1;
+        }
+
+        for (i = 0 ; i < grplist->group_cnt ; i++) {
+                const struct rd_kafka_group_info *gi = &grplist->groups[i];
+                int j;
+
+                printf("Group \"%s\" in state %s on broker %d (%s:%d)\n",
+                       gi->group, gi->state,
+                       gi->broker.id, gi->broker.host, gi->broker.port);
+                if (gi->err)
+                        printf(" Error: %s\n", rd_kafka_err2str(gi->err));
+                printf(" Protocol type \"%s\", protocol \"%s\", "
+                       "with %d member(s):\n",
+                       gi->protocol_type, gi->protocol, gi->member_cnt);
+
+                for (j = 0 ; j < gi->member_cnt ; j++) {
+                        const struct rd_kafka_group_member_info *mi;
+                        mi = &gi->members[j];
+
+                        printf("  \"%s\", client id \"%s\" on host %s\n",
+                               mi->member_id, mi->client_id, mi->client_host);
+                        printf("    metadata: %d bytes\n",
+                               mi->member_metadata_size);
+                        printf("    assignment: %d bytes\n",
+                               mi->member_assignment_size);
+                }
+                printf("\n");
+        }
+
+        if (group && !grplist->group_cnt)
+                fprintf(stderr, "%% No matching group (%s)\n", group);
+
+        rd_kafka_group_list_destroy(grplist);
+
+        return 0;
+}
+
+
+
+static void sig_usr1 (int sig) {
+	rd_kafka_dump(stdout, rk);
+}
+
+int main (int argc, char **argv) {
+        char mode = 'C';
+	char *brokers = "localhost:9092";
+	int opt;
+	rd_kafka_conf_t *conf;
+	rd_kafka_topic_conf_t *topic_conf;
+	char errstr[512];
+	const char *debug = NULL;
+	int do_conf_dump = 0;
+	char tmp[16];
+        rd_kafka_resp_err_t err;
+        char *group = NULL;
+        rd_kafka_topic_partition_list_t *topics;
+        int is_subscription;
+        int i;
+
+	quiet = !isatty(STDIN_FILENO);
+
+	/* Kafka configuration */
+	conf = rd_kafka_conf_new();
+
+        /* Set logger */
+        rd_kafka_conf_set_log_cb(conf, logger);
+
+	/* Quick termination */
+	snprintf(tmp, sizeof(tmp), "%i", SIGIO);
+	rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
+
+	/* Topic configuration */
+	topic_conf = rd_kafka_topic_conf_new();
+
+	while ((opt = getopt(argc, argv, "g:b:qd:eX:ADO")) != -1) {
+		switch (opt) {
+		case 'b':
+			brokers = optarg;
+			break;
+                case 'g':
+                        group = optarg;
+                        break;
+		case 'e':
+			exit_eof = 1;
+			break;
+		case 'd':
+			debug = optarg;
+			break;
+		case 'q':
+			quiet = 1;
+			break;
+		case 'A':
+			output = OUTPUT_RAW;
+			break;
+		case 'X':
+		{
+			char *name, *val;
+			rd_kafka_conf_res_t res;
+
+			if (!strcmp(optarg, "list") ||
+			    !strcmp(optarg, "help")) {
+				rd_kafka_conf_properties_show(stdout);
+				exit(0);
+			}
+
+			if (!strcmp(optarg, "dump")) {
+				do_conf_dump = 1;
+				continue;
+			}
+
+			name = optarg;
+			if (!(val = strchr(name, '='))) {
+				fprintf(stderr, "%% Expected "
+					"-X property=value, not %s\n", name);
+				exit(1);
+			}
+
+			*val = '\0';
+			val++;
+
+			res = RD_KAFKA_CONF_UNKNOWN;
+			/* Try "topic." prefixed properties on topic
+			 * conf first, and then fall through to global if
+			 * it didnt match a topic configuration property. */
+			if (!strncmp(name, "topic.", strlen("topic.")))
+				res = rd_kafka_topic_conf_set(topic_conf,
+							      name+
+							      strlen("topic."),
+							      val,
+							      errstr,
+							      sizeof(errstr));
+
+			if (res == RD_KAFKA_CONF_UNKNOWN)
+				res = rd_kafka_conf_set(conf, name, val,
+							errstr, sizeof(errstr));
+
+			if (res != RD_KAFKA_CONF_OK) {
+				fprintf(stderr, "%% %s\n", errstr);
+				exit(1);
+			}
+		}
+		break;
+
+                case 'D':
+                case 'O':
+                        mode = opt;
+                        break;
+
+		default:
+			goto usage;
+		}
+	}
+
+
+	if (do_conf_dump) {
+		const char **arr;
+		size_t cnt;
+		int pass;
+
+		for (pass = 0 ; pass < 2 ; pass++) {
+			if (pass == 0) {
+				arr = rd_kafka_conf_dump(conf, &cnt);
+				printf("# Global config\n");
+			} else {
+				printf("# Topic config\n");
+				arr = rd_kafka_topic_conf_dump(topic_conf,
+							       &cnt);
+			}
+
+			for (i = 0 ; i < (int)cnt ; i += 2)
+				printf("%s = %s\n",
+				       arr[i], arr[i+1]);
+
+			printf("\n");
+
+			rd_kafka_conf_dump_free(arr, cnt);
+		}
+
+		exit(0);
+	}
+
+
+	if (strchr("OC", mode) && optind == argc) {
+	usage:
+		fprintf(stderr,
+			"Usage: %s [options] <topic[:part]> <topic[:part]>..\n"
+			"\n"
+			"librdkafka version %s (0x%08x)\n"
+			"\n"
+			" Options:\n"
+                        "  -g <group>      Consumer group (%s)\n"
+			"  -b <brokers>    Broker address (%s)\n"
+			"  -e              Exit consumer when last message\n"
+			"                  in partition has been received.\n"
+                        "  -D              Describe group.\n"
+                        "  -O              Get commmitted offset(s)\n"
+			"  -d [facs..]     Enable debugging contexts:\n"
+			"                  %s\n"
+			"  -q              Be quiet\n"
+			"  -A              Raw payload output (consumer)\n"
+			"  -X <prop=name> Set arbitrary librdkafka "
+			"configuration property\n"
+			"               Properties prefixed with \"topic.\" "
+			"will be set on topic object.\n"
+			"               Use '-X list' to see the full list\n"
+			"               of supported properties.\n"
+			"\n"
+                        "For balanced consumer groups use the 'topic1 topic2..'"
+                        " format\n"
+                        "and for static assignment use "
+                        "'topic1:part1 topic1:part2 topic2:part1..'\n"
+			"\n",
+			argv[0],
+			rd_kafka_version_str(), rd_kafka_version(),
+                        group, brokers,
+			RD_KAFKA_DEBUG_CONTEXTS);
+		exit(1);
+	}
+
+
+	signal(SIGINT, stop);
+	signal(SIGUSR1, sig_usr1);
+
+	if (debug &&
+	    rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) !=
+	    RD_KAFKA_CONF_OK) {
+		fprintf(stderr, "%% Debug configuration failed: %s: %s\n",
+			errstr, debug);
+		exit(1);
+	}
+
+        /*
+         * Client/Consumer group
+         */
+
+        if (strchr("CO", mode)) {
+                /* Consumer groups require a group id */
+                if (!group)
+                        group = "rdkafka_consumer_example";
+                if (rd_kafka_conf_set(conf, "group.id", group,
+                                      errstr, sizeof(errstr)) !=
+                    RD_KAFKA_CONF_OK) {
+                        fprintf(stderr, "%% %s\n", errstr);
+                        exit(1);
+                }
+
+                /* Consumer groups always use broker based offset storage */
+                if (rd_kafka_topic_conf_set(topic_conf, "offset.store.method",
+                                            "broker",
+                                            errstr, sizeof(errstr)) !=
+                    RD_KAFKA_CONF_OK) {
+                        fprintf(stderr, "%% %s\n", errstr);
+                        exit(1);
+                }
+
+                /* Set default topic config for pattern-matched topics. */
+                rd_kafka_conf_set_default_topic_conf(conf, topic_conf);
+
+                /* Callback called on partition assignment changes */
+                rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
+        }
+
+        /* Create Kafka handle */
+        if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
+                                errstr, sizeof(errstr)))) {
+                fprintf(stderr,
+                        "%% Failed to create new consumer: %s\n",
+                        errstr);
+                exit(1);
+        }
+
+        /* Add brokers */
+        if (rd_kafka_brokers_add(rk, brokers) == 0) {
+                fprintf(stderr, "%% No valid brokers specified\n");
+                exit(1);
+        }
+
+
+        if (mode == 'D') {
+                int r;
+                /* Describe groups */
+                r = describe_groups(rk, group);
+
+                rd_kafka_destroy(rk);
+                exit(r == -1 ? 1 : 0);
+        }
+
+        /* Redirect rd_kafka_poll() to consumer_poll() */
+        rd_kafka_poll_set_consumer(rk);
+
+        topics = rd_kafka_topic_partition_list_new(argc - optind);
+        is_subscription = 1;
+        for (i = optind ; i < argc ; i++) {
+                /* Parse "topic[:part] */
+                char *topic = argv[i];
+                char *t;
+                int32_t partition = -1;
+
+                if ((t = strstr(topic, ":"))) {
+                        *t = '\0';
+                        partition = atoi(t+1);
+                        is_subscription = 0; /* is assignment */
+                        wait_eof++;
+                }
+
+                rd_kafka_topic_partition_list_add(topics, topic, partition);
+        }
+
+        if (mode == 'O') {
+                /* Offset query */
+
+                err = rd_kafka_committed(rk, topics, 5000);
+                if (err) {
+                        fprintf(stderr, "%% Failed to fetch offsets: %s\n",
+                                rd_kafka_err2str(err));
+                        exit(1);
+                }
+
+                for (i = 0 ; i < topics->cnt ; i++) {
+                        rd_kafka_topic_partition_t *p = &topics->elems[i];
+                        printf("Topic \"%s\" partition %"PRId32,
+                               p->topic, p->partition);
+                        if (p->err)
+                                printf(" error %s",
+                                       rd_kafka_err2str(p->err));
+                        else {
+                                printf(" offset %"PRId64"",
+                                       p->offset);
+
+                                if (p->metadata_size)
+                                        printf(" (%d bytes of metadata)",
+                                               (int)p->metadata_size);
+                        }
+                        printf("\n");
+                }
+
+                goto done;
+        }
+
+
+        if (is_subscription) {
+                fprintf(stderr, "%% Subscribing to %d topics\n", topics->cnt);
+
+                if ((err = rd_kafka_subscribe(rk, topics))) {
+                        fprintf(stderr,
+                                "%% Failed to start consuming topics: %s\n",
+                                rd_kafka_err2str(err));
+                        exit(1);
+                }
+        } else {
+                fprintf(stderr, "%% Assigning %d partitions\n", topics->cnt);
+
+                if ((err = rd_kafka_assign(rk, topics))) {
+                        fprintf(stderr,
+                                "%% Failed to assign partitions: %s\n",
+                                rd_kafka_err2str(err));
+                }
+        }
+
+        while (run) {
+                rd_kafka_message_t *rkmessage;
+
+                rkmessage = rd_kafka_consumer_poll(rk, 1000);
+                if (rkmessage) {
+                        msg_consume(rkmessage);
+                        rd_kafka_message_destroy(rkmessage);
+                }
+        }
+
+done:
+        err = rd_kafka_consumer_close(rk);
+        if (err)
+                fprintf(stderr, "%% Failed to close consumer: %s\n",
+                        rd_kafka_err2str(err));
+        else
+                fprintf(stderr, "%% Consumer closed\n");
+
+        rd_kafka_topic_partition_list_destroy(topics);
+
+        /* Destroy handle */
+        rd_kafka_destroy(rk);
+
+	/* Let background threads clean up and terminate cleanly. */
+	run = 5;
+	while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1)
+		printf("Waiting for librdkafka to decommission\n");
+	if (run <= 0)
+		rd_kafka_dump(stdout, rk);
+
+	return 0;
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/examples/rdkafka_consumer_example.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/examples/rdkafka_consumer_example.cpp b/thirdparty/librdkafka-0.11.4/examples/rdkafka_consumer_example.cpp
new file mode 100644
index 0000000..83da691
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/examples/rdkafka_consumer_example.cpp
@@ -0,0 +1,485 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2014, Magnus Edenhill
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met: 
+ * 
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer. 
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution. 
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer example programs
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+
+#ifndef _MSC_VER
+#include <sys/time.h>
+#endif
+
+#ifdef _MSC_VER
+#include "../win32/wingetopt.h"
+#include <atltime.h>
+#elif _AIX
+#include <unistd.h>
+#else
+#include <getopt.h>
+#include <unistd.h>
+#endif
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+
+
+static bool run = true;
+static bool exit_eof = false;
+static int eof_cnt = 0;
+static int partition_cnt = 0;
+static int verbosity = 1;
+static long msg_cnt = 0;
+static int64_t msg_bytes = 0;
+static void sigterm (int sig) {
+  run = false;
+}
+
+
+/**
+ * @brief format a string timestamp from the current time
+ */
+static void print_time () {
+#ifndef _MSC_VER
+        struct timeval tv;
+        char buf[64];
+        gettimeofday(&tv, NULL);
+        strftime(buf, sizeof(buf) - 1, "%Y-%m-%d %H:%M:%S", localtime(&tv.tv_sec));
+        fprintf(stderr, "%s.%03d: ", buf, (int)(tv.tv_usec / 1000));
+#else
+        std::wcerr << CTime::GetCurrentTime().Format(_T("%Y-%m-%d %H:%M:%S")).GetString()
+                << ": ";
+#endif
+}
+class ExampleEventCb : public RdKafka::EventCb {
+ public:
+  void event_cb (RdKafka::Event &event) {
+
+    print_time();
+
+    switch (event.type())
+    {
+      case RdKafka::Event::EVENT_ERROR:
+        std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
+            event.str() << std::endl;
+        if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
+          run = false;
+        break;
+
+      case RdKafka::Event::EVENT_STATS:
+        std::cerr << "\"STATS\": " << event.str() << std::endl;
+        break;
+
+      case RdKafka::Event::EVENT_LOG:
+        fprintf(stderr, "LOG-%i-%s: %s\n",
+                event.severity(), event.fac().c_str(), event.str().c_str());
+        break;
+
+      case RdKafka::Event::EVENT_THROTTLE:
+	std::cerr << "THROTTLED: " << event.throttle_time() << "ms by " <<
+	  event.broker_name() << " id " << (int)event.broker_id() << std::endl;
+	break;
+
+      default:
+        std::cerr << "EVENT " << event.type() <<
+            " (" << RdKafka::err2str(event.err()) << "): " <<
+            event.str() << std::endl;
+        break;
+    }
+  }
+};
+
+
+class ExampleRebalanceCb : public RdKafka::RebalanceCb {
+private:
+  static void part_list_print (const std::vector<RdKafka::TopicPartition*>&partitions){
+    for (unsigned int i = 0 ; i < partitions.size() ; i++)
+      std::cerr << partitions[i]->topic() <<
+	"[" << partitions[i]->partition() << "], ";
+    std::cerr << "\n";
+  }
+
+public:
+  void rebalance_cb (RdKafka::KafkaConsumer *consumer,
+		     RdKafka::ErrorCode err,
+                     std::vector<RdKafka::TopicPartition*> &partitions) {
+    std::cerr << "RebalanceCb: " << RdKafka::err2str(err) << ": ";
+
+    part_list_print(partitions);
+
+    if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
+      consumer->assign(partitions);
+      partition_cnt = (int)partitions.size();
+    } else {
+      consumer->unassign();
+      partition_cnt = 0;
+    }
+    eof_cnt = 0;
+  }
+};
+
+
+void msg_consume(RdKafka::Message* message, void* opaque) {
+  switch (message->err()) {
+    case RdKafka::ERR__TIMED_OUT:
+      break;
+
+    case RdKafka::ERR_NO_ERROR:
+      /* Real message */
+      msg_cnt++;
+      msg_bytes += message->len();
+      if (verbosity >= 3)
+        std::cerr << "Read msg at offset " << message->offset() << std::endl;
+      RdKafka::MessageTimestamp ts;
+      ts = message->timestamp();
+      if (verbosity >= 2 &&
+	  ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) {
+	std::string tsname = "?";
+	if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME)
+	  tsname = "create time";
+        else if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_LOG_APPEND_TIME)
+          tsname = "log append time";
+        std::cout << "Timestamp: " << tsname << " " << ts.timestamp << std::endl;
+      }
+      if (verbosity >= 2 && message->key()) {
+        std::cout << "Key: " << *message->key() << std::endl;
+      }
+      if (verbosity >= 1) {
+        printf("%.*s\n",
+               static_cast<int>(message->len()),
+               static_cast<const char *>(message->payload()));
+      }
+      break;
+
+    case RdKafka::ERR__PARTITION_EOF:
+      /* Last message */
+      if (exit_eof && ++eof_cnt == partition_cnt) {
+        std::cerr << "%% EOF reached for all " << partition_cnt <<
+            " partition(s)" << std::endl;
+        run = false;
+      }
+      break;
+
+    case RdKafka::ERR__UNKNOWN_TOPIC:
+    case RdKafka::ERR__UNKNOWN_PARTITION:
+      std::cerr << "Consume failed: " << message->errstr() << std::endl;
+      run = false;
+      break;
+
+    default:
+      /* Errors */
+      std::cerr << "Consume failed: " << message->errstr() << std::endl;
+      run = false;
+  }
+}
+
+
+class ExampleConsumeCb : public RdKafka::ConsumeCb {
+ public:
+  void consume_cb (RdKafka::Message &msg, void *opaque) {
+    msg_consume(&msg, opaque);
+  }
+};
+
+
+
+int main (int argc, char **argv) {
+  std::string brokers = "localhost";
+  std::string errstr;
+  std::string topic_str;
+  std::string mode;
+  std::string debug;
+  std::vector<std::string> topics;
+  bool do_conf_dump = false;
+  int opt;
+  int use_ccb = 0;
+
+  /*
+   * Create configuration objects
+   */
+  RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+  RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
+
+  ExampleRebalanceCb ex_rebalance_cb;
+  conf->set("rebalance_cb", &ex_rebalance_cb, errstr);
+
+  while ((opt = getopt(argc, argv, "g:b:z:qd:eX:AM:f:qv")) != -1) {
+    switch (opt) {
+    case 'g':
+      if (conf->set("group.id",  optarg, errstr) != RdKafka::Conf::CONF_OK) {
+        std::cerr << errstr << std::endl;
+        exit(1);
+      }
+      break;
+    case 'b':
+      brokers = optarg;
+      break;
+    case 'z':
+      if (conf->set("compression.codec", optarg, errstr) !=
+	  RdKafka::Conf::CONF_OK) {
+	std::cerr << errstr << std::endl;
+	exit(1);
+      }
+      break;
+    case 'e':
+      exit_eof = true;
+      break;
+    case 'd':
+      debug = optarg;
+      break;
+    case 'M':
+      if (conf->set("statistics.interval.ms", optarg, errstr) !=
+          RdKafka::Conf::CONF_OK) {
+        std::cerr << errstr << std::endl;
+        exit(1);
+      }
+      break;
+    case 'X':
+      {
+	char *name, *val;
+
+	if (!strcmp(optarg, "dump")) {
+	  do_conf_dump = true;
+	  continue;
+	}
+
+	name = optarg;
+	if (!(val = strchr(name, '='))) {
+          std::cerr << "%% Expected -X property=value, not " <<
+              name << std::endl;
+	  exit(1);
+	}
+
+	*val = '\0';
+	val++;
+
+	/* Try "topic." prefixed properties on topic
+	 * conf first, and then fall through to global if
+	 * it didnt match a topic configuration property. */
+        RdKafka::Conf::ConfResult res = RdKafka::Conf::CONF_UNKNOWN;
+	if (!strncmp(name, "topic.", strlen("topic.")))
+          res = tconf->set(name+strlen("topic."), val, errstr);
+        if (res == RdKafka::Conf::CONF_UNKNOWN)
+	  res = conf->set(name, val, errstr);
+
+	if (res != RdKafka::Conf::CONF_OK) {
+          std::cerr << errstr << std::endl;
+	  exit(1);
+	}
+      }
+      break;
+
+      case 'f':
+        if (!strcmp(optarg, "ccb"))
+          use_ccb = 1;
+        else {
+          std::cerr << "Unknown option: " << optarg << std::endl;
+          exit(1);
+        }
+        break;
+
+      case 'q':
+        verbosity--;
+        break;
+
+      case 'v':
+        verbosity++;
+        break;
+
+    default:
+      goto usage;
+    }
+  }
+
+  for (; optind < argc ; optind++)
+    topics.push_back(std::string(argv[optind]));
+
+  if (topics.empty() || optind != argc) {
+  usage:
+    fprintf(stderr,
+            "Usage: %s -g <group-id> [options] topic1 topic2..\n"
+            "\n"
+            "librdkafka version %s (0x%08x)\n"
+            "\n"
+            " Options:\n"
+            "  -g <group-id>   Consumer group id\n"
+            "  -b <brokers>    Broker address (localhost:9092)\n"
+            "  -z <codec>      Enable compression:\n"
+            "                  none|gzip|snappy\n"
+            "  -e              Exit consumer when last message\n"
+            "                  in partition has been received.\n"
+            "  -d [facs..]     Enable debugging contexts:\n"
+            "                  %s\n"
+            "  -M <intervalms> Enable statistics\n"
+            "  -X <prop=name>  Set arbitrary librdkafka "
+            "configuration property\n"
+            "                  Properties prefixed with \"topic.\" "
+            "will be set on topic object.\n"
+            "                  Use '-X list' to see the full list\n"
+            "                  of supported properties.\n"
+            "  -f <flag>       Set option:\n"
+            "                     ccb - use consume_callback\n"
+            "  -q              Quiet / Decrease verbosity\n"
+            "  -v              Increase verbosity\n"
+            "\n"
+            "\n",
+	    argv[0],
+	    RdKafka::version_str().c_str(), RdKafka::version(),
+	    RdKafka::get_debug_contexts().c_str());
+	exit(1);
+  }
+
+
+  /*
+   * Set configuration properties
+   */
+  conf->set("metadata.broker.list", brokers, errstr);
+
+  if (!debug.empty()) {
+    if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) {
+      std::cerr << errstr << std::endl;
+      exit(1);
+    }
+  }
+
+  ExampleConsumeCb ex_consume_cb;
+
+  if(use_ccb) {
+    conf->set("consume_cb", &ex_consume_cb, errstr);
+  }
+
+  ExampleEventCb ex_event_cb;
+  conf->set("event_cb", &ex_event_cb, errstr);
+
+  if (do_conf_dump) {
+    int pass;
+
+    for (pass = 0 ; pass < 2 ; pass++) {
+      std::list<std::string> *dump;
+      if (pass == 0) {
+        dump = conf->dump();
+        std::cout << "# Global config" << std::endl;
+      } else {
+        dump = tconf->dump();
+        std::cout << "# Topic config" << std::endl;
+      }
+
+      for (std::list<std::string>::iterator it = dump->begin();
+           it != dump->end(); ) {
+        std::cout << *it << " = ";
+        it++;
+        std::cout << *it << std::endl;
+        it++;
+      }
+      std::cout << std::endl;
+    }
+    exit(0);
+  }
+
+  conf->set("default_topic_conf", tconf, errstr);
+  delete tconf;
+
+  signal(SIGINT, sigterm);
+  signal(SIGTERM, sigterm);
+
+
+  /*
+   * Consumer mode
+   */
+
+  /*
+   * Create consumer using accumulated global configuration.
+   */
+  RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create(conf, errstr);
+  if (!consumer) {
+    std::cerr << "Failed to create consumer: " << errstr << std::endl;
+    exit(1);
+  }
+
+  delete conf;
+
+  std::cout << "% Created consumer " << consumer->name() << std::endl;
+
+
+  /*
+   * Subscribe to topics
+   */
+  RdKafka::ErrorCode err = consumer->subscribe(topics);
+  if (err) {
+    std::cerr << "Failed to subscribe to " << topics.size() << " topics: "
+              << RdKafka::err2str(err) << std::endl;
+    exit(1);
+  }
+
+  /*
+   * Consume messages
+   */
+  while (run) {
+    RdKafka::Message *msg = consumer->consume(1000);
+    if (!use_ccb) {
+      msg_consume(msg, NULL);
+    }
+    delete msg;
+  }
+
+#ifndef _MSC_VER
+  alarm(10);
+#endif
+
+  /*
+   * Stop consumer
+   */
+  consumer->close();
+  delete consumer;
+
+  std::cerr << "% Consumed " << msg_cnt << " messages ("
+            << msg_bytes << " bytes)" << std::endl;
+
+  /*
+   * Wait for RdKafka to decommission.
+   * This is not strictly needed (with check outq_len() above), but
+   * allows RdKafka to clean up all its resources before the application
+   * exits so that memory profilers such as valgrind wont complain about
+   * memory leaks.
+   */
+  RdKafka::wait_destroyed(5000);
+
+  return 0;
+}


[47/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/examples/rdkafka_example.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/examples/rdkafka_example.cpp b/thirdparty/librdkafka-0.11.1/examples/rdkafka_example.cpp
deleted file mode 100644
index 30d0d0e..0000000
--- a/thirdparty/librdkafka-0.11.1/examples/rdkafka_example.cpp
+++ /dev/null
@@ -1,645 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2014, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Apache Kafka consumer & producer example programs
- * using the Kafka driver from librdkafka
- * (https://github.com/edenhill/librdkafka)
- */
-
-#include <iostream>
-#include <string>
-#include <cstdlib>
-#include <cstdio>
-#include <csignal>
-#include <cstring>
-
-#ifdef _MSC_VER
-#include "../win32/wingetopt.h"
-#elif _AIX
-#include <unistd.h>
-#else
-#include <getopt.h>
-#endif
-
-/*
- * Typically include path in a real application would be
- * #include <librdkafka/rdkafkacpp.h>
- */
-#include "rdkafkacpp.h"
-
-
-static void metadata_print (const std::string &topic,
-                            const RdKafka::Metadata *metadata) {
-  std::cout << "Metadata for " << (topic.empty() ? "" : "all topics")
-           << "(from broker "  << metadata->orig_broker_id()
-           << ":" << metadata->orig_broker_name() << std::endl;
-
-  /* Iterate brokers */
-  std::cout << " " << metadata->brokers()->size() << " brokers:" << std::endl;
-  RdKafka::Metadata::BrokerMetadataIterator ib;
-  for (ib = metadata->brokers()->begin();
-       ib != metadata->brokers()->end();
-       ++ib) {
-    std::cout << "  broker " << (*ib)->id() << " at "
-              << (*ib)->host() << ":" << (*ib)->port() << std::endl;
-  }
-  /* Iterate topics */
-  std::cout << metadata->topics()->size() << " topics:" << std::endl;
-  RdKafka::Metadata::TopicMetadataIterator it;
-  for (it = metadata->topics()->begin();
-       it != metadata->topics()->end();
-       ++it) {
-    std::cout << "  topic \""<< (*it)->topic() << "\" with "
-              << (*it)->partitions()->size() << " partitions:";
-
-    if ((*it)->err() != RdKafka::ERR_NO_ERROR) {
-      std::cout << " " << err2str((*it)->err());
-      if ((*it)->err() == RdKafka::ERR_LEADER_NOT_AVAILABLE)
-        std::cout << " (try again)";
-    }
-    std::cout << std::endl;
-
-    /* Iterate topic's partitions */
-    RdKafka::TopicMetadata::PartitionMetadataIterator ip;
-    for (ip = (*it)->partitions()->begin();
-         ip != (*it)->partitions()->end();
-         ++ip) {
-      std::cout << "    partition " << (*ip)->id()
-                << ", leader " << (*ip)->leader()
-                << ", replicas: ";
-
-      /* Iterate partition's replicas */
-      RdKafka::PartitionMetadata::ReplicasIterator ir;
-      for (ir = (*ip)->replicas()->begin();
-           ir != (*ip)->replicas()->end();
-           ++ir) {
-        std::cout << (ir == (*ip)->replicas()->begin() ? "":",") << *ir;
-      }
-
-      /* Iterate partition's ISRs */
-      std::cout << ", isrs: ";
-      RdKafka::PartitionMetadata::ISRSIterator iis;
-      for (iis = (*ip)->isrs()->begin(); iis != (*ip)->isrs()->end() ; ++iis)
-        std::cout << (iis == (*ip)->isrs()->begin() ? "":",") << *iis;
-
-      if ((*ip)->err() != RdKafka::ERR_NO_ERROR)
-        std::cout << ", " << RdKafka::err2str((*ip)->err()) << std::endl;
-      else
-        std::cout << std::endl;
-    }
-  }
-}
-
-static bool run = true;
-static bool exit_eof = false;
-
-static void sigterm (int sig) {
-  run = false;
-}
-
-
-class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
- public:
-  void dr_cb (RdKafka::Message &message) {
-    std::cout << "Message delivery for (" << message.len() << " bytes): " <<
-        message.errstr() << std::endl;
-    if (message.key())
-      std::cout << "Key: " << *(message.key()) << ";" << std::endl;
-  }
-};
-
-
-class ExampleEventCb : public RdKafka::EventCb {
- public:
-  void event_cb (RdKafka::Event &event) {
-    switch (event.type())
-    {
-      case RdKafka::Event::EVENT_ERROR:
-        std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
-            event.str() << std::endl;
-        if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
-          run = false;
-        break;
-
-      case RdKafka::Event::EVENT_STATS:
-        std::cerr << "\"STATS\": " << event.str() << std::endl;
-        break;
-
-      case RdKafka::Event::EVENT_LOG:
-        fprintf(stderr, "LOG-%i-%s: %s\n",
-                event.severity(), event.fac().c_str(), event.str().c_str());
-        break;
-
-      default:
-        std::cerr << "EVENT " << event.type() <<
-            " (" << RdKafka::err2str(event.err()) << "): " <<
-            event.str() << std::endl;
-        break;
-    }
-  }
-};
-
-
-/* Use of this partitioner is pretty pointless since no key is provided
- * in the produce() call. */
-class MyHashPartitionerCb : public RdKafka::PartitionerCb {
- public:
-  int32_t partitioner_cb (const RdKafka::Topic *topic, const std::string *key,
-                          int32_t partition_cnt, void *msg_opaque) {
-    return djb_hash(key->c_str(), key->size()) % partition_cnt;
-  }
- private:
-
-  static inline unsigned int djb_hash (const char *str, size_t len) {
-    unsigned int hash = 5381;
-    for (size_t i = 0 ; i < len ; i++)
-      hash = ((hash << 5) + hash) + str[i];
-    return hash;
-  }
-};
-
-void msg_consume(RdKafka::Message* message, void* opaque) {
-  switch (message->err()) {
-    case RdKafka::ERR__TIMED_OUT:
-      break;
-
-    case RdKafka::ERR_NO_ERROR:
-      /* Real message */
-      std::cout << "Read msg at offset " << message->offset() << std::endl;
-      if (message->key()) {
-        std::cout << "Key: " << *message->key() << std::endl;
-      }
-      printf("%.*s\n",
-        static_cast<int>(message->len()),
-        static_cast<const char *>(message->payload()));
-      break;
-
-    case RdKafka::ERR__PARTITION_EOF:
-      /* Last message */
-      if (exit_eof) {
-        run = false;
-      }
-      break;
-
-    case RdKafka::ERR__UNKNOWN_TOPIC:
-    case RdKafka::ERR__UNKNOWN_PARTITION:
-      std::cerr << "Consume failed: " << message->errstr() << std::endl;
-      run = false;
-      break;
-
-    default:
-      /* Errors */
-      std::cerr << "Consume failed: " << message->errstr() << std::endl;
-      run = false;
-  }
-}
-
-
-class ExampleConsumeCb : public RdKafka::ConsumeCb {
- public:
-  void consume_cb (RdKafka::Message &msg, void *opaque) {
-    msg_consume(&msg, opaque);
-  }
-};
-
-
-
-int main (int argc, char **argv) {
-  std::string brokers = "localhost";
-  std::string errstr;
-  std::string topic_str;
-  std::string mode;
-  std::string debug;
-  int32_t partition = RdKafka::Topic::PARTITION_UA;
-  int64_t start_offset = RdKafka::Topic::OFFSET_BEGINNING;
-  bool do_conf_dump = false;
-  int opt;
-  MyHashPartitionerCb hash_partitioner;
-  int use_ccb = 0;
-
-  /*
-   * Create configuration objects
-   */
-  RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
-  RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
-
-
-  while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:AM:f:")) != -1) {
-    switch (opt) {
-    case 'P':
-    case 'C':
-    case 'L':
-      mode = opt;
-      break;
-    case 't':
-      topic_str = optarg;
-      break;
-    case 'p':
-      if (!strcmp(optarg, "random"))
-        /* default */;
-      else if (!strcmp(optarg, "hash")) {
-        if (tconf->set("partitioner_cb", &hash_partitioner, errstr) !=
-            RdKafka::Conf::CONF_OK) {
-          std::cerr << errstr << std::endl;
-          exit(1);
-        }
-      } else
-        partition = std::atoi(optarg);
-      break;
-    case 'b':
-      brokers = optarg;
-      break;
-    case 'z':
-      if (conf->set("compression.codec", optarg, errstr) !=
-	  RdKafka::Conf::CONF_OK) {
-	std::cerr << errstr << std::endl;
-	exit(1);
-      }
-      break;
-    case 'o':
-      if (!strcmp(optarg, "end"))
-	start_offset = RdKafka::Topic::OFFSET_END;
-      else if (!strcmp(optarg, "beginning"))
-	start_offset = RdKafka::Topic::OFFSET_BEGINNING;
-      else if (!strcmp(optarg, "stored"))
-	start_offset = RdKafka::Topic::OFFSET_STORED;
-      else
-	start_offset = strtoll(optarg, NULL, 10);
-      break;
-    case 'e':
-      exit_eof = true;
-      break;
-    case 'd':
-      debug = optarg;
-      break;
-    case 'M':
-      if (conf->set("statistics.interval.ms", optarg, errstr) !=
-          RdKafka::Conf::CONF_OK) {
-        std::cerr << errstr << std::endl;
-        exit(1);
-      }
-      break;
-    case 'X':
-      {
-	char *name, *val;
-
-	if (!strcmp(optarg, "dump")) {
-	  do_conf_dump = true;
-	  continue;
-	}
-
-	name = optarg;
-	if (!(val = strchr(name, '='))) {
-          std::cerr << "%% Expected -X property=value, not " <<
-              name << std::endl;
-	  exit(1);
-	}
-
-	*val = '\0';
-	val++;
-
-	/* Try "topic." prefixed properties on topic
-	 * conf first, and then fall through to global if
-	 * it didnt match a topic configuration property. */
-        RdKafka::Conf::ConfResult res;
-	if (!strncmp(name, "topic.", strlen("topic.")))
-          res = tconf->set(name+strlen("topic."), val, errstr);
-        else
-	  res = conf->set(name, val, errstr);
-
-	if (res != RdKafka::Conf::CONF_OK) {
-          std::cerr << errstr << std::endl;
-	  exit(1);
-	}
-      }
-      break;
-
-      case 'f':
-        if (!strcmp(optarg, "ccb"))
-          use_ccb = 1;
-        else {
-          std::cerr << "Unknown option: " << optarg << std::endl;
-          exit(1);
-        }
-        break;
-
-    default:
-      goto usage;
-    }
-  }
-
-  if (mode.empty() || (topic_str.empty() && mode != "L") || optind != argc) {
-  usage:
-	  std::string features;
-	  conf->get("builtin.features", features);
-    fprintf(stderr,
-            "Usage: %s [-C|-P] -t <topic> "
-            "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n"
-            "\n"
-            "librdkafka version %s (0x%08x, builtin.features \"%s\")\n"
-            "\n"
-            " Options:\n"
-            "  -C | -P         Consumer or Producer mode\n"
-            "  -L              Metadata list mode\n"
-            "  -t <topic>      Topic to fetch / produce\n"
-            "  -p <num>        Partition (random partitioner)\n"
-            "  -p <func>       Use partitioner:\n"
-            "                  random (default), hash\n"
-            "  -b <brokers>    Broker address (localhost:9092)\n"
-            "  -z <codec>      Enable compression:\n"
-            "                  none|gzip|snappy\n"
-            "  -o <offset>     Start offset (consumer)\n"
-            "  -e              Exit consumer when last message\n"
-            "                  in partition has been received.\n"
-            "  -d [facs..]     Enable debugging contexts:\n"
-            "                  %s\n"
-            "  -M <intervalms> Enable statistics\n"
-            "  -X <prop=name>  Set arbitrary librdkafka "
-            "configuration property\n"
-            "                  Properties prefixed with \"topic.\" "
-            "will be set on topic object.\n"
-            "                  Use '-X list' to see the full list\n"
-            "                  of supported properties.\n"
-            "  -f <flag>       Set option:\n"
-            "                     ccb - use consume_callback\n"
-            "\n"
-            " In Consumer mode:\n"
-            "  writes fetched messages to stdout\n"
-            " In Producer mode:\n"
-            "  reads messages from stdin and sends to broker\n"
-            "\n"
-            "\n"
-            "\n",
-	    argv[0],
-	    RdKafka::version_str().c_str(), RdKafka::version(),
-		features.c_str(),
-	    RdKafka::get_debug_contexts().c_str());
-	exit(1);
-  }
-
-
-  /*
-   * Set configuration properties
-   */
-  conf->set("metadata.broker.list", brokers, errstr);
-
-  if (!debug.empty()) {
-    if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) {
-      std::cerr << errstr << std::endl;
-      exit(1);
-    }
-  }
-
-  ExampleEventCb ex_event_cb;
-  conf->set("event_cb", &ex_event_cb, errstr);
-
-  if (do_conf_dump) {
-    int pass;
-
-    for (pass = 0 ; pass < 2 ; pass++) {
-      std::list<std::string> *dump;
-      if (pass == 0) {
-        dump = conf->dump();
-        std::cout << "# Global config" << std::endl;
-      } else {
-        dump = tconf->dump();
-        std::cout << "# Topic config" << std::endl;
-      }
-
-      for (std::list<std::string>::iterator it = dump->begin();
-           it != dump->end(); ) {
-        std::cout << *it << " = ";
-        it++;
-        std::cout << *it << std::endl;
-        it++;
-      }
-      std::cout << std::endl;
-    }
-    exit(0);
-  }
-
-  signal(SIGINT, sigterm);
-  signal(SIGTERM, sigterm);
-
-
-  if (mode == "P") {
-    /*
-     * Producer mode
-     */
-
-    if(topic_str.empty())
-      goto usage;
-
-    ExampleDeliveryReportCb ex_dr_cb;
-
-    /* Set delivery report callback */
-    conf->set("dr_cb", &ex_dr_cb, errstr);
-
-    /*
-     * Create producer using accumulated global configuration.
-     */
-    RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
-    if (!producer) {
-      std::cerr << "Failed to create producer: " << errstr << std::endl;
-      exit(1);
-    }
-
-    std::cout << "% Created producer " << producer->name() << std::endl;
-
-    /*
-     * Create topic handle.
-     */
-    RdKafka::Topic *topic = RdKafka::Topic::create(producer, topic_str,
-						   tconf, errstr);
-    if (!topic) {
-      std::cerr << "Failed to create topic: " << errstr << std::endl;
-      exit(1);
-    }
-
-    /*
-     * Read messages from stdin and produce to broker.
-     */
-    for (std::string line; run && std::getline(std::cin, line);) {
-      if (line.empty()) {
-        producer->poll(0);
-	continue;
-      }
-
-      /*
-       * Produce message
-       */
-      RdKafka::ErrorCode resp =
-	producer->produce(topic, partition,
-			  RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
-			  const_cast<char *>(line.c_str()), line.size(),
-			  NULL, NULL);
-      if (resp != RdKafka::ERR_NO_ERROR)
-	std::cerr << "% Produce failed: " <<
-	  RdKafka::err2str(resp) << std::endl;
-      else
-	std::cerr << "% Produced message (" << line.size() << " bytes)" <<
-	  std::endl;
-
-      producer->poll(0);
-    }
-    run = true;
-
-    while (run && producer->outq_len() > 0) {
-      std::cerr << "Waiting for " << producer->outq_len() << std::endl;
-      producer->poll(1000);
-    }
-
-    delete topic;
-    delete producer;
-
-
-  } else if (mode == "C") {
-    /*
-     * Consumer mode
-     */
-
-    if(topic_str.empty())
-      goto usage;
-
-    /*
-     * Create consumer using accumulated global configuration.
-     */
-    RdKafka::Consumer *consumer = RdKafka::Consumer::create(conf, errstr);
-    if (!consumer) {
-      std::cerr << "Failed to create consumer: " << errstr << std::endl;
-      exit(1);
-    }
-
-    std::cout << "% Created consumer " << consumer->name() << std::endl;
-
-    /*
-     * Create topic handle.
-     */
-    RdKafka::Topic *topic = RdKafka::Topic::create(consumer, topic_str,
-						   tconf, errstr);
-    if (!topic) {
-      std::cerr << "Failed to create topic: " << errstr << std::endl;
-      exit(1);
-    }
-
-    /*
-     * Start consumer for topic+partition at start offset
-     */
-    RdKafka::ErrorCode resp = consumer->start(topic, partition, start_offset);
-    if (resp != RdKafka::ERR_NO_ERROR) {
-      std::cerr << "Failed to start consumer: " <<
-	RdKafka::err2str(resp) << std::endl;
-      exit(1);
-    }
-
-    ExampleConsumeCb ex_consume_cb;
-
-    /*
-     * Consume messages
-     */
-    while (run) {
-      if (use_ccb) {
-        consumer->consume_callback(topic, partition, 1000,
-                                   &ex_consume_cb, &use_ccb);
-      } else {
-        RdKafka::Message *msg = consumer->consume(topic, partition, 1000);
-        msg_consume(msg, NULL);
-        delete msg;
-      }
-      consumer->poll(0);
-    }
-
-    /*
-     * Stop consumer
-     */
-    consumer->stop(topic, partition);
-
-    consumer->poll(1000);
-
-    delete topic;
-    delete consumer;
-  } else {
-    /* Metadata mode */
-
-    /*
-     * Create producer using accumulated global configuration.
-     */
-    RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
-    if (!producer) {
-      std::cerr << "Failed to create producer: " << errstr << std::endl;
-      exit(1);
-    }
-
-    std::cout << "% Created producer " << producer->name() << std::endl;
-
-    /*
-     * Create topic handle.
-     */
-    RdKafka::Topic *topic = NULL;
-    if(!topic_str.empty()) {
-      topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr);
-      if (!topic) {
-        std::cerr << "Failed to create topic: " << errstr << std::endl;
-        exit(1);
-      }
-    }
-
-    while (run) {
-      class RdKafka::Metadata *metadata;
-
-      /* Fetch metadata */
-      RdKafka::ErrorCode err = producer->metadata(topic!=NULL, topic,
-                              &metadata, 5000);
-      if (err != RdKafka::ERR_NO_ERROR) {
-        std::cerr << "%% Failed to acquire metadata: " 
-                  << RdKafka::err2str(err) << std::endl;
-              run = 0;
-              break;
-      }
-
-      metadata_print(topic_str, metadata);
-
-      delete metadata;
-      run = 0;
-    }
-
-  }
-
-
-  /*
-   * Wait for RdKafka to decommission.
-   * This is not strictly needed (when check outq_len() above), but
-   * allows RdKafka to clean up all its resources before the application
-   * exits so that memory profilers such as valgrind wont complain about
-   * memory leaks.
-   */
-  RdKafka::wait_destroyed(5000);
-
-  return 0;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/examples/rdkafka_performance.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/examples/rdkafka_performance.c b/thirdparty/librdkafka-0.11.1/examples/rdkafka_performance.c
deleted file mode 100644
index 34294d8..0000000
--- a/thirdparty/librdkafka-0.11.1/examples/rdkafka_performance.c
+++ /dev/null
@@ -1,1561 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Apache Kafka consumer & producer performance tester
- * using the Kafka driver from librdkafka
- * (https://github.com/edenhill/librdkafka)
- */
-
-#ifdef _MSC_VER
-#define  _CRT_SECURE_NO_WARNINGS /* Silence nonsense on MSVC */
-#endif
-
-#include "../src/rd.h"
-
-#define _GNU_SOURCE /* for strndup() */
-#include <ctype.h>
-#include <signal.h>
-#include <string.h>
-#include <errno.h>
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h"  /* for Kafka driver */
-/* Do not include these defines from your program, they will not be
- * provided by librdkafka. */
-#include "rd.h"
-#include "rdtime.h"
-
-#ifdef _MSC_VER
-#include "../win32/wingetopt.h"
-#include "../win32/wintime.h"
-#endif
-
-
-static int run = 1;
-static int forever = 1;
-static rd_ts_t dispintvl = 1000;
-static int do_seq = 0;
-static int exit_after = 0;
-static int exit_eof = 0;
-static FILE *stats_fp;
-static int dr_disp_div;
-static int verbosity = 1;
-static int latency_mode = 0;
-static int report_offset = 0;
-static FILE *latency_fp = NULL;
-static int msgcnt = -1;
-static int incremental_mode = 0;
-static int partition_cnt = 0;
-static int eof_cnt = 0;
-static int with_dr = 1;
-
-static void stop (int sig) {
-        if (!run)
-                exit(0);
-	run = 0;
-}
-
-static long int msgs_wait_cnt = 0;
-static long int msgs_wait_produce_cnt = 0;
-static rd_ts_t t_end;
-static rd_kafka_t *global_rk;
-
-struct avg {
-        int64_t  val;
-        int      cnt;
-        uint64_t ts_start;
-};
-
-static struct {
-	rd_ts_t  t_start;
-	rd_ts_t  t_end;
-	rd_ts_t  t_end_send;
-	uint64_t msgs;
-	uint64_t msgs_last;
-        uint64_t msgs_dr_ok;
-        uint64_t msgs_dr_err;
-        uint64_t bytes_dr_ok;
-	uint64_t bytes;
-	uint64_t bytes_last;
-	uint64_t tx;
-	uint64_t tx_err;
-        uint64_t avg_rtt;
-        uint64_t offset;
-	rd_ts_t  t_fetch_latency;
-	rd_ts_t  t_last;
-        rd_ts_t  t_enobufs_last;
-	rd_ts_t  t_total;
-        rd_ts_t  latency_last;
-        rd_ts_t  latency_lo;
-        rd_ts_t  latency_hi;
-        rd_ts_t  latency_sum;
-        int      latency_cnt;
-        int64_t  last_offset;
-} cnt;
-
-
-uint64_t wall_clock (void) {
-        struct timeval tv;
-        gettimeofday(&tv, NULL);
-        return ((uint64_t)tv.tv_sec * 1000000LLU) +
-		((uint64_t)tv.tv_usec);
-}
-
-static void err_cb (rd_kafka_t *rk, int err, const char *reason, void *opaque) {
-	printf("%% ERROR CALLBACK: %s: %s: %s\n",
-	       rd_kafka_name(rk), rd_kafka_err2str(err), reason);
-}
-
-static void throttle_cb (rd_kafka_t *rk, const char *broker_name,
-			 int32_t broker_id, int throttle_time_ms,
-			 void *opaque) {
-	printf("%% THROTTLED %dms by %s (%"PRId32")\n", throttle_time_ms,
-	       broker_name, broker_id);
-}
-
-static void offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
-                              rd_kafka_topic_partition_list_t *offsets,
-                              void *opaque) {
-        int i;
-
-        if (err || verbosity >= 2)
-                printf("%% Offset commit of %d partition(s): %s\n",
-                       offsets->cnt, rd_kafka_err2str(err));
-
-        for (i = 0 ; i < offsets->cnt ; i++) {
-                rd_kafka_topic_partition_t *rktpar = &offsets->elems[i];
-                if (rktpar->err || verbosity >= 2)
-                        printf("%%  %s [%"PRId32"] @ %"PRId64": %s\n",
-                               rktpar->topic, rktpar->partition,
-                               rktpar->offset, rd_kafka_err2str(err));
-        }
-}
-
-/**
- * @brief Add latency measurement
- */
-static void latency_add (int64_t ts, const char *who) {
-        if (ts > cnt.latency_hi)
-                cnt.latency_hi = ts;
-        if (!cnt.latency_lo || ts < cnt.latency_lo)
-                cnt.latency_lo = ts;
-        cnt.latency_last = ts;
-        cnt.latency_cnt++;
-        cnt.latency_sum += ts;
-        if (latency_fp)
-                fprintf(latency_fp, "%"PRIu64"\n", ts);
-}
-
-
-static void msg_delivered (rd_kafka_t *rk,
-                           const rd_kafka_message_t *rkmessage, void *opaque) {
-	static rd_ts_t last;
-	rd_ts_t now = rd_clock();
-	static int msgs;
-
-	msgs++;
-
-	msgs_wait_cnt--;
-
-	if (rkmessage->err)
-                cnt.msgs_dr_err++;
-        else {
-                cnt.msgs_dr_ok++;
-                cnt.bytes_dr_ok += rkmessage->len;
-        }
-
-        if (latency_mode) {
-                /* Extract latency */
-                int64_t source_ts;
-                if (sscanf(rkmessage->payload, "LATENCY:%"SCNd64,
-                           &source_ts) == 1)
-                        latency_add(wall_clock() - source_ts, "producer");
-        }
-
-
-	if ((rkmessage->err &&
-	     (cnt.msgs_dr_err < 50 ||
-              !(cnt.msgs_dr_err % (dispintvl / 1000)))) ||
-	    !last || msgs_wait_cnt < 5 ||
-	    !(msgs_wait_cnt % dr_disp_div) || 
-	    (now - last) >= dispintvl * 1000 ||
-            verbosity >= 3) {
-		if (rkmessage->err && verbosity >= 2)
-			printf("%% Message delivery failed: %s [%"PRId32"]: "
-			       "%s (%li remain)\n",
-			       rd_kafka_topic_name(rkmessage->rkt),
-			       rkmessage->partition,
-			       rd_kafka_err2str(rkmessage->err),
-			       msgs_wait_cnt);
-		else if (verbosity > 2)
-			printf("%% Message delivered (offset %"PRId64"): "
-                               "%li remain\n",
-                               rkmessage->offset, msgs_wait_cnt);
-		if (verbosity >= 3 && do_seq)
-			printf(" --> \"%.*s\"\n",
-                               (int)rkmessage->len,
-                               (const char *)rkmessage->payload);
-		last = now;
-	}
-
-        if (report_offset)
-                cnt.last_offset = rkmessage->offset;
-
-	if (msgs_wait_produce_cnt == 0 && msgs_wait_cnt == 0 && !forever) {
-		if (verbosity >= 2)
-			printf("All messages delivered!\n");
-		t_end = rd_clock();
-		run = 0;
-	}
-
-	if (exit_after && exit_after <= msgs) {
-		printf("%% Hard exit after %i messages, as requested\n",
-		       exit_after);
-		exit(0);
-	}
-}
-
-
-static void msg_consume (rd_kafka_message_t *rkmessage, void *opaque) {
-
-	if (rkmessage->err) {
-		if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
-                        cnt.offset = rkmessage->offset;
-
-                        if (verbosity >= 1)
-                                printf("%% Consumer reached end of "
-                                       "%s [%"PRId32"] "
-                                       "message queue at offset %"PRId64"\n",
-                                       rd_kafka_topic_name(rkmessage->rkt),
-                                       rkmessage->partition, rkmessage->offset);
-
-			if (exit_eof && ++eof_cnt == partition_cnt)
-				run = 0;
-
-			return;
-		}
-
-		printf("%% Consume error for topic \"%s\" [%"PRId32"] "
-		       "offset %"PRId64": %s\n",
-		       rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt):"",
-		       rkmessage->partition,
-		       rkmessage->offset,
-		       rd_kafka_message_errstr(rkmessage));
-
-                if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
-                    rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
-                        run = 0;
-
-                cnt.msgs_dr_err++;
-		return;
-	}
-
-	/* Start measuring from first message received */
-	if (!cnt.t_start)
-		cnt.t_start = cnt.t_last = rd_clock();
-
-        cnt.offset = rkmessage->offset;
-	cnt.msgs++;
-	cnt.bytes += rkmessage->len;
-
-	if (verbosity >= 3 ||
-            (verbosity >= 2 && !(cnt.msgs % 1000000)))
-		printf("@%"PRId64": %.*s: %.*s\n",
-		       rkmessage->offset,
-                       (int)rkmessage->key_len, (char *)rkmessage->key,
-		       (int)rkmessage->len, (char *)rkmessage->payload);
-
-
-        if (latency_mode) {
-                int64_t remote_ts, ts;
-
-                if (rkmessage->len > 8 &&
-                    !memcmp(rkmessage->payload, "LATENCY:", 8) &&
-                    sscanf(rkmessage->payload, "LATENCY:%"SCNd64,
-                           &remote_ts) == 1) {
-                        ts = wall_clock() - remote_ts;
-                        if (ts > 0 && ts < (1000000 * 60 * 5)) {
-                                latency_add(ts, "consumer");
-                        } else {
-                                if (verbosity >= 1)
-                                        printf("Received latency timestamp is too far off: %"PRId64"us (message offset %"PRId64"): ignored\n",
-                                               ts, rkmessage->offset);
-                        }
-                } else if (verbosity > 1)
-                        printf("not a LATENCY payload: %.*s\n",
-                               (int)rkmessage->len,
-                               (char *)rkmessage->payload);
-
-        }
-
-        if (msgcnt != -1 && (int)cnt.msgs >= msgcnt)
-                run = 0;
-}
-
-
-static void rebalance_cb (rd_kafka_t *rk,
-			  rd_kafka_resp_err_t err,
-			  rd_kafka_topic_partition_list_t *partitions,
-			  void *opaque) {
-
-	switch (err)
-	{
-	case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
-		fprintf(stderr,
-			"%% Group rebalanced: %d partition(s) assigned\n",
-			partitions->cnt);
-		eof_cnt = 0;
-		partition_cnt = partitions->cnt;
-		rd_kafka_assign(rk, partitions);
-		break;
-
-	case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
-		fprintf(stderr,
-			"%% Group rebalanced: %d partition(s) revoked\n",
-			partitions->cnt);
-		eof_cnt = 0;
-		partition_cnt = 0;
-		rd_kafka_assign(rk, NULL);
-		break;
-
-	default:
-		break;
-	}
-}
-
-
-/**
- * Find and extract single value from a two-level search.
- * First find 'field1', then find 'field2' and extract its value.
- * Returns 0 on miss else the value.
- */
-static uint64_t json_parse_fields (const char *json, const char **end,
-                                   const char *field1, const char *field2) {
-        const char *t = json;
-        const char *t2;
-        int len1 = (int)strlen(field1);
-        int len2 = (int)strlen(field2);
-
-        while ((t2 = strstr(t, field1))) {
-                uint64_t v;
-
-                t = t2;
-                t += len1;
-
-                /* Find field */
-                if (!(t2 = strstr(t, field2)))
-                        continue;
-                t2 += len2;
-
-                while (isspace((int)*t2))
-                        t2++;
-
-                v = strtoull(t2, (char **)&t, 10);
-                if (t2 == t)
-                        continue;
-
-                *end = t;
-                return v;
-        }
-
-        *end = t + strlen(t);
-        return 0;
-}
-
-/**
- * Parse various values from rdkafka stats
- */
-static void json_parse_stats (const char *json) {
-        const char *t;
-#define MAX_AVGS 100 /* max number of brokers to scan for rtt */
-        uint64_t avg_rtt[MAX_AVGS+1];
-        int avg_rtt_i     = 0;
-
-        /* Store totals at end of array */
-        avg_rtt[MAX_AVGS]     = 0;
-
-        /* Extract all broker RTTs */
-        t = json;
-        while (avg_rtt_i < MAX_AVGS && *t) {
-                avg_rtt[avg_rtt_i] = json_parse_fields(t, &t,
-                                                       "\"rtt\":",
-                                                       "\"avg\":");
-
-                /* Skip low RTT values, means no messages are passing */
-                if (avg_rtt[avg_rtt_i] < 100 /*0.1ms*/)
-                        continue;
-
-
-                avg_rtt[MAX_AVGS] += avg_rtt[avg_rtt_i];
-                avg_rtt_i++;
-        }
-
-        if (avg_rtt_i > 0)
-                avg_rtt[MAX_AVGS] /= avg_rtt_i;
-
-        cnt.avg_rtt = avg_rtt[MAX_AVGS];
-}
-
-
-static int stats_cb (rd_kafka_t *rk, char *json, size_t json_len,
-		     void *opaque) {
-
-        /* Extract values for our own stats */
-        json_parse_stats(json);
-
-        if (stats_fp)
-                fprintf(stats_fp, "%s\n", json);
-	return 0;
-}
-
-#define _OTYPE_TAB      0x1  /* tabular format */
-#define _OTYPE_SUMMARY  0x2  /* summary format */
-#define _OTYPE_FORCE    0x4  /* force output regardless of interval timing */
-static void print_stats (rd_kafka_t *rk,
-                         int mode, int otype, const char *compression) {
-	rd_ts_t now = rd_clock();
-	rd_ts_t t_total;
-        static int rows_written = 0;
-        int print_header;
-        double latency_avg = 0.0f;
-        char extra[512];
-        int extra_of = 0;
-        *extra = '\0';
-
-	if (!(otype & _OTYPE_FORCE) &&
-            (((otype & _OTYPE_SUMMARY) && verbosity == 0) ||
-             cnt.t_last + dispintvl > now))
-		return;
-
-        print_header = !rows_written ||(verbosity > 0 && !(rows_written % 20));
-
-	if (cnt.t_end_send)
-		t_total = cnt.t_end_send - cnt.t_start;
-	else if (cnt.t_end)
-		t_total = cnt.t_end - cnt.t_start;
-	else if (cnt.t_start)
-		t_total = now - cnt.t_start;
-	else
-		t_total = 1;
-
-        if (latency_mode && cnt.latency_cnt)
-                latency_avg = (double)cnt.latency_sum /
-                        (double)cnt.latency_cnt;
-
-        if (mode == 'P') {
-
-                if (otype & _OTYPE_TAB) {
-#define ROW_START()        do {} while (0)
-#define COL_HDR(NAME)      printf("| %10.10s ", (NAME))
-#define COL_PR64(NAME,VAL) printf("| %10"PRIu64" ", (VAL))
-#define COL_PRF(NAME,VAL)  printf("| %10.2f ", (VAL))
-#define ROW_END()          do {                 \
-                                printf("\n");   \
-                                rows_written++; \
-                        } while (0)
-
-                        if (print_header) {
-                                /* First time, print header */
-                                ROW_START();
-                                COL_HDR("elapsed");
-                                COL_HDR("msgs");
-                                COL_HDR("bytes");
-                                COL_HDR("rtt");
-                                COL_HDR("dr");
-                                COL_HDR("dr_m/s");
-                                COL_HDR("dr_MB/s");
-                                COL_HDR("dr_err");
-                                COL_HDR("tx_err");
-                                COL_HDR("outq");
-                                if (report_offset)
-                                        COL_HDR("offset");
-                                if (latency_mode) {
-                                        COL_HDR("lat_curr");
-                                        COL_HDR("lat_avg");
-                                        COL_HDR("lat_lo");
-                                        COL_HDR("lat_hi");
-                                }
-
-                                ROW_END();
-                        }
-
-                        ROW_START();
-                        COL_PR64("elapsed", t_total / 1000);
-                        COL_PR64("msgs", cnt.msgs);
-                        COL_PR64("bytes", cnt.bytes);
-                        COL_PR64("rtt", cnt.avg_rtt / 1000);
-                        COL_PR64("dr", cnt.msgs_dr_ok);
-                        COL_PR64("dr_m/s",
-                                 ((cnt.msgs_dr_ok * 1000000) / t_total));
-                        COL_PRF("dr_MB/s",
-                                (float)((cnt.bytes_dr_ok) / (float)t_total));
-                        COL_PR64("dr_err", cnt.msgs_dr_err);
-                        COL_PR64("tx_err", cnt.tx_err);
-                        COL_PR64("outq",
-                                 rk ? (uint64_t)rd_kafka_outq_len(rk) : 0);
-                        if (report_offset)
-                                COL_PR64("offset", (uint64_t)cnt.last_offset);
-                        if (latency_mode) {
-                                COL_PRF("lat_curr", cnt.latency_last / 1000.0f);
-                                COL_PRF("lat_avg", latency_avg / 1000.0f);
-                                COL_PRF("lat_lo", cnt.latency_lo / 1000.0f);
-                                COL_PRF("lat_hi", cnt.latency_hi / 1000.0f);
-                        }
-                        ROW_END();
-                }
-
-                if (otype & _OTYPE_SUMMARY) {
-                        printf("%% %"PRIu64" messages produced "
-                               "(%"PRIu64" bytes), "
-                               "%"PRIu64" delivered "
-                               "(offset %"PRId64", %"PRIu64" failed) "
-                               "in %"PRIu64"ms: %"PRIu64" msgs/s and "
-                               "%.02f MB/s, "
-                               "%"PRIu64" produce failures, %i in queue, "
-                               "%s compression\n",
-                               cnt.msgs, cnt.bytes,
-                               cnt.msgs_dr_ok, cnt.last_offset, cnt.msgs_dr_err,
-                               t_total / 1000,
-                               ((cnt.msgs_dr_ok * 1000000) / t_total),
-                               (float)((cnt.bytes_dr_ok) / (float)t_total),
-                               cnt.tx_err,
-                               rk ? rd_kafka_outq_len(rk) : 0,
-                               compression);
-                }
-
-        } else {
-
-                if (otype & _OTYPE_TAB) {
-                        if (print_header) {
-                                /* First time, print header */
-                                ROW_START();
-                                COL_HDR("elapsed");
-                                COL_HDR("msgs");
-                                COL_HDR("bytes");
-                                COL_HDR("rtt");
-                                COL_HDR("m/s");
-                                COL_HDR("MB/s");
-                                COL_HDR("rx_err");
-                                COL_HDR("offset");
-                                if (latency_mode) {
-                                        COL_HDR("lat_curr");
-                                        COL_HDR("lat_avg");
-                                        COL_HDR("lat_lo");
-                                        COL_HDR("lat_hi");
-                                }
-                                ROW_END();
-                        }
-
-                        ROW_START();
-                        COL_PR64("elapsed", t_total / 1000);
-                        COL_PR64("msgs", cnt.msgs);
-                        COL_PR64("bytes", cnt.bytes);
-                        COL_PR64("rtt", cnt.avg_rtt / 1000);
-                        COL_PR64("m/s",
-                                 ((cnt.msgs * 1000000) / t_total));
-                        COL_PRF("MB/s",
-                                (float)((cnt.bytes) / (float)t_total));
-                        COL_PR64("rx_err", cnt.msgs_dr_err);
-                        COL_PR64("offset", cnt.offset);
-                        if (latency_mode) {
-                                COL_PRF("lat_curr", cnt.latency_last / 1000.0f);
-                                COL_PRF("lat_avg", latency_avg / 1000.0f);
-                                COL_PRF("lat_lo", cnt.latency_lo / 1000.0f);
-                                COL_PRF("lat_hi", cnt.latency_hi / 1000.0f);
-                        }
-                        ROW_END();
-
-                }
-
-                if (otype & _OTYPE_SUMMARY) {
-                        if (latency_avg >= 1.0f)
-                                extra_of += rd_snprintf(extra+extra_of,
-                                                     sizeof(extra)-extra_of,
-                                                     ", latency "
-                                                     "curr/avg/lo/hi "
-                                                     "%.2f/%.2f/%.2f/%.2fms",
-                                                     cnt.latency_last / 1000.0f,
-                                                     latency_avg  / 1000.0f,
-                                                     cnt.latency_lo / 1000.0f,
-                                                     cnt.latency_hi / 1000.0f)
-;
-                        printf("%% %"PRIu64" messages (%"PRIu64" bytes) "
-                               "consumed in %"PRIu64"ms: %"PRIu64" msgs/s "
-                               "(%.02f MB/s)"
-                               "%s\n",
-                               cnt.msgs, cnt.bytes,
-                               t_total / 1000,
-                               ((cnt.msgs * 1000000) / t_total),
-                               (float)((cnt.bytes) / (float)t_total),
-                               extra);
-                }
-
-                if (incremental_mode && now > cnt.t_last) {
-                        uint64_t i_msgs = cnt.msgs - cnt.msgs_last;
-                        uint64_t i_bytes = cnt.bytes - cnt.bytes_last;
-                        uint64_t i_time = cnt.t_last ? now - cnt.t_last : 0;
-
-                        printf("%% INTERVAL: %"PRIu64" messages "
-                               "(%"PRIu64" bytes) "
-                               "consumed in %"PRIu64"ms: %"PRIu64" msgs/s "
-                               "(%.02f MB/s)"
-                               "%s\n",
-                               i_msgs, i_bytes,
-                               i_time / 1000,
-                               ((i_msgs * 1000000) / i_time),
-                               (float)((i_bytes) / (float)i_time),
-                               extra);
-
-                }
-        }
-
-	cnt.t_last = now;
-	cnt.msgs_last = cnt.msgs;
-	cnt.bytes_last = cnt.bytes;
-}
-
-
-static void sig_usr1 (int sig) {
-	rd_kafka_dump(stdout, global_rk);
-}
-
-
-/**
- * @brief Read config from file
- * @returns -1 on error, else 0.
- */
-static int read_conf_file (rd_kafka_conf_t *conf,
-                           rd_kafka_topic_conf_t *tconf, const char *path) {
-        FILE *fp;
-        char buf[512];
-        int line = 0;
-        char errstr[512];
-
-        if (!(fp = fopen(path, "r"))) {
-                fprintf(stderr, "%% Failed to open %s: %s\n",
-                        path, strerror(errno));
-                return -1;
-        }
-
-        while (fgets(buf, sizeof(buf), fp)) {
-                char *s = buf;
-                char *t;
-                rd_kafka_conf_res_t r = RD_KAFKA_CONF_UNKNOWN;
-
-                line++;
-
-                while (isspace((int)*s))
-                        s++;
-
-                if (!*s || *s == '#')
-                        continue;
-
-                if ((t = strchr(buf, '\n')))
-                        *t = '\0';
-
-                t = strchr(buf, '=');
-                if (!t || t == s || !*(t+1)) {
-                        fprintf(stderr, "%% %s:%d: expected key=value\n",
-                                path, line);
-                        fclose(fp);
-                        return -1;
-                }
-
-                *(t++) = '\0';
-
-                /* Try property on topic config first */
-                if (tconf)
-                        r = rd_kafka_topic_conf_set(tconf, s, t,
-                                                    errstr, sizeof(errstr));
-
-                /* Try global config */
-                if (r == RD_KAFKA_CONF_UNKNOWN)
-                        r = rd_kafka_conf_set(conf, s, t,
-                                              errstr, sizeof(errstr));
-
-                if (r == RD_KAFKA_CONF_OK)
-                        continue;
-
-                fprintf(stderr, "%% %s:%d: %s=%s: %s\n",
-                        path, line, s, t, errstr);
-                fclose(fp);
-                return -1;
-        }
-
-        fclose(fp);
-
-        return 0;
-}
-
-
-
-int main (int argc, char **argv) {
-	char *brokers = NULL;
-	char mode = 'C';
-	char *topic = NULL;
-	const char *key = NULL;
-        int *partitions = NULL;
-	int opt;
-	int sendflags = 0;
-	char *msgpattern = "librdkafka_performance testing!";
-	int msgsize = (int)strlen(msgpattern);
-	const char *debug = NULL;
-	rd_ts_t now;
-	char errstr[512];
-	uint64_t seq = 0;
-	int seed = (int)time(NULL);
-        rd_kafka_t *rk;
-	rd_kafka_topic_t *rkt;
-	rd_kafka_conf_t *conf;
-	rd_kafka_topic_conf_t *topic_conf;
-	rd_kafka_queue_t *rkqu = NULL;
-	const char *compression = "no";
-	int64_t start_offset = 0;
-	int batch_size = 0;
-	int idle = 0;
-        const char *stats_cmd = NULL;
-        char *stats_intvlstr = NULL;
-        char tmp[128];
-        char *tmp2;
-        int otype = _OTYPE_SUMMARY;
-        double dtmp;
-        int rate_sleep = 0;
-	rd_kafka_topic_partition_list_t *topics;
-        int exitcode = 0;
-
-	/* Kafka configuration */
-	conf = rd_kafka_conf_new();
-	rd_kafka_conf_set_error_cb(conf, err_cb);
-	rd_kafka_conf_set_throttle_cb(conf, throttle_cb);
-        rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb);
-
-#ifdef SIGIO
-        /* Quick termination */
-	rd_snprintf(tmp, sizeof(tmp), "%i", SIGIO);
-	rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
-#endif
-
-	/* Producer config */
-	rd_kafka_conf_set(conf, "queue.buffering.max.messages", "500000",
-			  NULL, 0);
-	rd_kafka_conf_set(conf, "message.send.max.retries", "3", NULL, 0);
-	rd_kafka_conf_set(conf, "retry.backoff.ms", "500", NULL, 0);
-
-	/* Consumer config */
-	/* Tell rdkafka to (try to) maintain 1M messages
-	 * in its internal receive buffers. This is to avoid
-	 * application -> rdkafka -> broker  per-message ping-pong
-	 * latency.
-	 * The larger the local queue, the higher the performance.
-	 * Try other values with: ... -X queued.min.messages=1000
-	 */
-	rd_kafka_conf_set(conf, "queued.min.messages", "1000000", NULL, 0);
-	rd_kafka_conf_set(conf, "session.timeout.ms", "6000", NULL, 0);
-
-	/* Kafka topic configuration */
-	topic_conf = rd_kafka_topic_conf_new();
-	rd_kafka_topic_conf_set(topic_conf, "auto.offset.reset", "earliest",
-				NULL, 0);
-
-	topics = rd_kafka_topic_partition_list_new(1);
-
-	while ((opt =
-		getopt(argc, argv,
-		       "PCG:t:p:b:s:k:c:fi:MDd:m:S:x:"
-                       "R:a:z:o:X:B:eT:Y:qvIur:lA:OwN")) != -1) {
-		switch (opt) {
-		case 'G':
-			if (rd_kafka_conf_set(conf, "group.id", optarg,
-					      errstr, sizeof(errstr)) !=
-			    RD_KAFKA_CONF_OK) {
-				fprintf(stderr, "%% %s\n", errstr);
-				exit(1);
-			}
-			/* FALLTHRU */
-		case 'P':
-		case 'C':
-			mode = opt;
-			break;
-		case 't':
-			rd_kafka_topic_partition_list_add(topics, optarg,
-							  RD_KAFKA_PARTITION_UA);
-			break;
-		case 'p':
-                        partition_cnt++;
-			partitions = realloc(partitions, sizeof(*partitions) * partition_cnt);
-			partitions[partition_cnt-1] = atoi(optarg);
-			break;
-
-		case 'b':
-			brokers = optarg;
-			break;
-		case 's':
-			msgsize = atoi(optarg);
-			break;
-		case 'k':
-			key = optarg;
-			break;
-		case 'c':
-			msgcnt = atoi(optarg);
-			break;
-		case 'D':
-			sendflags |= RD_KAFKA_MSG_F_FREE;
-			break;
-		case 'i':
-			dispintvl = atoi(optarg);
-			break;
-		case 'm':
-			msgpattern = optarg;
-			break;
-		case 'S':
-			seq = strtoull(optarg, NULL, 10);
-			do_seq = 1;
-			break;
-		case 'x':
-			exit_after = atoi(optarg);
-			break;
-		case 'R':
-			seed = atoi(optarg);
-			break;
-		case 'a':
-			if (rd_kafka_topic_conf_set(topic_conf,
-						    "request.required.acks",
-						    optarg,
-						    errstr, sizeof(errstr)) !=
-			    RD_KAFKA_CONF_OK) {
-				fprintf(stderr, "%% %s\n", errstr);
-				exit(1);
-			}
-			break;
-		case 'B':
-			batch_size = atoi(optarg);
-			break;
-		case 'z':
-			if (rd_kafka_conf_set(conf, "compression.codec",
-					      optarg,
-					      errstr, sizeof(errstr)) !=
-			    RD_KAFKA_CONF_OK) {
-				fprintf(stderr, "%% %s\n", errstr);
-				exit(1);
-			}
-			compression = optarg;
-			break;
-		case 'o':
-			if (!strcmp(optarg, "end"))
-				start_offset = RD_KAFKA_OFFSET_END;
-			else if (!strcmp(optarg, "beginning"))
-				start_offset = RD_KAFKA_OFFSET_BEGINNING;
-			else if (!strcmp(optarg, "stored"))
-				start_offset = RD_KAFKA_OFFSET_STORED;
-			else {
-				start_offset = strtoll(optarg, NULL, 10);
-
-				if (start_offset < 0)
-					start_offset = RD_KAFKA_OFFSET_TAIL(-start_offset);
-			}
-
-			break;
-		case 'e':
-			exit_eof = 1;
-			break;
-		case 'd':
-			debug = optarg;
-			break;
-		case 'X':
-		{
-			char *name, *val;
-			rd_kafka_conf_res_t res;
-
-			if (!strcmp(optarg, "list") ||
-			    !strcmp(optarg, "help")) {
-				rd_kafka_conf_properties_show(stdout);
-				exit(0);
-			}
-
-			name = optarg;
-			if (!(val = strchr(name, '='))) {
-				fprintf(stderr, "%% Expected "
-					"-X property=value, not %s\n", name);
-				exit(1);
-			}
-
-			*val = '\0';
-			val++;
-
-                        if (!strcmp(name, "file")) {
-                                if (read_conf_file(conf, topic_conf, val) == -1)
-                                        exit(1);
-                                break;
-                        }
-
-			res = RD_KAFKA_CONF_UNKNOWN;
-			/* Try "topic." prefixed properties on topic
-			 * conf first, and then fall through to global if
-			 * it didnt match a topic configuration property. */
-			if (!strncmp(name, "topic.", strlen("topic.")))
-				res = rd_kafka_topic_conf_set(topic_conf,
-							      name+
-							      strlen("topic."),
-							      val,
-							      errstr,
-							      sizeof(errstr));
-
-			if (res == RD_KAFKA_CONF_UNKNOWN)
-				res = rd_kafka_conf_set(conf, name, val,
-							errstr, sizeof(errstr));
-
-			if (res != RD_KAFKA_CONF_OK) {
-				fprintf(stderr, "%% %s\n", errstr);
-				exit(1);
-			}
-		}
-		break;
-
-		case 'T':
-                        stats_intvlstr = optarg;
-			break;
-                case 'Y':
-                        stats_cmd = optarg;
-                        break;
-
-		case 'q':
-                        verbosity--;
-			break;
-
-		case 'v':
-                        verbosity++;
-			break;
-
-		case 'I':
-			idle = 1;
-			break;
-
-                case 'u':
-                        otype = _OTYPE_TAB;
-                        verbosity--; /* remove some fluff */
-                        break;
-
-                case 'r':
-                        dtmp = strtod(optarg, &tmp2);
-                        if (tmp2 == optarg ||
-                            (dtmp >= -0.001 && dtmp <= 0.001)) {
-                                fprintf(stderr, "%% Invalid rate: %s\n",
-                                        optarg);
-                                exit(1);
-                        }
-
-                        rate_sleep = (int)(1000000.0 / dtmp);
-                        break;
-
-                case 'l':
-                        latency_mode = 1;
-			break;
-
-		case 'A':
-			if (!(latency_fp = fopen(optarg, "w"))) {
-				fprintf(stderr,
-					"%% Cant open %s: %s\n",
-					optarg, strerror(errno));
-				exit(1);
-			}
-                        break;
-
-                case 'O':
-                        if (rd_kafka_topic_conf_set(topic_conf,
-                                                    "produce.offset.report",
-                                                    "true",
-                                                    errstr, sizeof(errstr)) !=
-                            RD_KAFKA_CONF_OK) {
-                                fprintf(stderr, "%% %s\n", errstr);
-                                exit(1);
-                        }
-                        report_offset = 1;
-                        break;
-
-		case 'M':
-			incremental_mode = 1;
-			break;
-
-		case 'N':
-			with_dr = 0;
-			break;
-
-		default:
-                        fprintf(stderr, "Unknown option: %c\n", opt);
-			goto usage;
-		}
-	}
-
-	if (topics->cnt == 0 || optind != argc) {
-                if (optind < argc)
-                        fprintf(stderr, "Unknown argument: %s\n", argv[optind]);
-	usage:
-		fprintf(stderr,
-			"Usage: %s [-C|-P] -t <topic> "
-			"[-p <partition>] [-b <broker,broker..>] [options..]\n"
-			"\n"
-			"librdkafka version %s (0x%08x)\n"
-			"\n"
-			" Options:\n"
-			"  -C | -P |    Consumer or Producer mode\n"
-			"  -G <groupid> High-level Kafka Consumer mode\n"
-			"  -t <topic>   Topic to consume / produce\n"
-			"  -p <num>     Partition (defaults to random). "
-			"Multiple partitions are allowed in -C consumer mode.\n"
-			"  -M           Print consumer interval stats\n"
-			"  -b <brokers> Broker address list (host[:port],..)\n"
-			"  -s <size>    Message size (producer)\n"
-			"  -k <key>     Message key (producer)\n"
-			"  -c <cnt>     Messages to transmit/receive\n"
-			"  -x <cnt>     Hard exit after transmitting <cnt> messages (producer)\n"
-			"  -D           Copy/Duplicate data buffer (producer)\n"
-			"  -i <ms>      Display interval\n"
-			"  -m <msg>     Message payload pattern\n"
-			"  -S <start>   Send a sequence number starting at "
-			"<start> as payload\n"
-			"  -R <seed>    Random seed value (defaults to time)\n"
-			"  -a <acks>    Required acks (producer): "
-			"-1, 0, 1, >1\n"
-			"  -B <size>    Consume batch size (# of msgs)\n"
-			"  -z <codec>   Enable compression:\n"
-			"               none|gzip|snappy\n"
-			"  -o <offset>  Start offset (consumer)\n"
-			"               beginning, end, NNNNN or -NNNNN\n"
-			"  -d [facs..]  Enable debugging contexts:\n"
-			"               %s\n"
-			"  -X <prop=name> Set arbitrary librdkafka "
-			"configuration property\n"
-			"               Properties prefixed with \"topic.\" "
-			"will be set on topic object.\n"
-			"               Use '-X list' to see the full list\n"
-			"               of supported properties.\n"
-                        "  -X file=<path> Read config from file.\n"
-			"  -T <intvl>   Enable statistics from librdkafka at "
-			"specified interval (ms)\n"
-                        "  -Y <command> Pipe statistics to <command>\n"
-			"  -I           Idle: dont produce any messages\n"
-			"  -q           Decrease verbosity\n"
-                        "  -v           Increase verbosity (default 1)\n"
-                        "  -u           Output stats in table format\n"
-                        "  -r <rate>    Producer msg/s limit\n"
-                        "  -l           Latency measurement.\n"
-                        "               Needs two matching instances, one\n"
-                        "               consumer and one producer, both\n"
-                        "               running with the -l switch.\n"
-                        "  -l           Producer: per-message latency stats\n"
-			"  -A <file>    Write per-message latency stats to "
-			"<file>. Requires -l\n"
-                        "  -O           Report produced offset (producer)\n"
-			"  -N           No delivery reports (producer)\n"
-			"\n"
-			" In Consumer mode:\n"
-			"  consumes messages and prints thruput\n"
-			"  If -B <..> is supplied the batch consumer\n"
-			"  mode is used, else the callback mode is used.\n"
-			"\n"
-			" In Producer mode:\n"
-			"  writes messages of size -s <..> and prints thruput\n"
-			"\n",
-			argv[0],
-			rd_kafka_version_str(), rd_kafka_version(),
-			RD_KAFKA_DEBUG_CONTEXTS);
-		exit(1);
-	}
-
-
-	dispintvl *= 1000; /* us */
-
-        if (verbosity > 1)
-                printf("%% Using random seed %i, verbosity level %i\n",
-                       seed, verbosity);
-	srand(seed);
-	signal(SIGINT, stop);
-#ifdef SIGUSR1
-	signal(SIGUSR1, sig_usr1);
-#endif
-
-
-	if (debug &&
-	    rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) !=
-	    RD_KAFKA_CONF_OK) {
-		printf("%% Debug configuration failed: %s: %s\n",
-		       errstr, debug);
-		exit(1);
-	}
-
-        /* Always enable stats (for RTT extraction), and if user supplied
-         * the -T <intvl> option we let her take part of the stats aswell. */
-        rd_kafka_conf_set_stats_cb(conf, stats_cb);
-
-        if (!stats_intvlstr) {
-                /* if no user-desired stats, adjust stats interval
-                 * to the display interval. */
-                rd_snprintf(tmp, sizeof(tmp), "%"PRId64, dispintvl / 1000);
-        }
-
-        if (rd_kafka_conf_set(conf, "statistics.interval.ms",
-                              stats_intvlstr ? stats_intvlstr : tmp,
-                              errstr, sizeof(errstr)) !=
-            RD_KAFKA_CONF_OK) {
-                fprintf(stderr, "%% %s\n", errstr);
-                exit(1);
-        }
-
-        if (latency_mode)
-                do_seq = 0;
-
-        if (stats_intvlstr) {
-                /* User enabled stats (-T) */
-
-#ifndef _MSC_VER
-                if (stats_cmd) {
-                        if (!(stats_fp = popen(stats_cmd, "we"))) {
-                                fprintf(stderr,
-                                        "%% Failed to start stats command: "
-                                        "%s: %s", stats_cmd, strerror(errno));
-                                exit(1);
-                        }
-                } else
-#endif
-                        stats_fp = stdout;
-        }
-
-	if (msgcnt != -1)
-		forever = 0;
-
-	topic = topics->elems[0].topic;
-
-	if (mode == 'P') {
-		/*
-		 * Producer
-		 */
-		char *sbuf;
-		char *pbuf;
-		int outq;
-		int keylen = key ? (int)strlen(key) : 0;
-		off_t rof = 0;
-		size_t plen = strlen(msgpattern);
-		int partition = partitions ? partitions[0] :
-			RD_KAFKA_PARTITION_UA;
-
-                if (latency_mode) {
-                        msgsize = (int)(strlen("LATENCY:") +
-                                strlen("18446744073709551615 ")+1);
-                        sendflags |= RD_KAFKA_MSG_F_COPY;
-		} else if (do_seq) {
-                        int minlen = (int)strlen("18446744073709551615 ")+1;
-                        if (msgsize < minlen)
-                                msgsize = minlen;
-
-			/* Force duplication of payload */
-                        sendflags |= RD_KAFKA_MSG_F_FREE;
-		}
-
-		sbuf = malloc(msgsize);
-
-		/* Copy payload content to new buffer */
-		while (rof < msgsize) {
-			size_t xlen = RD_MIN((size_t)msgsize-rof, plen);
-			memcpy(sbuf+rof, msgpattern, xlen);
-			rof += (off_t)xlen;
-		}
-
-		if (msgcnt == -1)
-			printf("%% Sending messages of size %i bytes\n",
-			       msgsize);
-		else
-			printf("%% Sending %i messages of size %i bytes\n",
-			       msgcnt, msgsize);
-
-		if (with_dr)
-			rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered);
-
-		/* Create Kafka handle */
-		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
- 					errstr, sizeof(errstr)))) {
-			fprintf(stderr,
-				"%% Failed to create Kafka producer: %s\n",
-				errstr);
-			exit(1);
-		}
-
-                global_rk = rk;
-
-		/* Add broker(s) */
-		if (brokers && rd_kafka_brokers_add(rk, brokers) < 1) {
-			fprintf(stderr, "%% No valid brokers specified\n");
-			exit(1);
-		}
-
-		/* Explicitly create topic to avoid per-msg lookups. */
-		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
-
-
-                if (rate_sleep && verbosity >= 2)
-                        fprintf(stderr,
-                                "%% Inter message rate limiter sleep %ius\n",
-                                rate_sleep);
-
-                dr_disp_div = msgcnt / 50;
-                if (dr_disp_div == 0)
-                        dr_disp_div = 10;
-
-		cnt.t_start = cnt.t_last = rd_clock();
-
-		msgs_wait_produce_cnt = msgcnt;
-
-		while (run && (msgcnt == -1 || (int)cnt.msgs < msgcnt)) {
-			/* Send/Produce message. */
-
-			if (idle) {
-				rd_kafka_poll(rk, 1000);
-				continue;
-			}
-
-                        if (latency_mode) {
-                                rd_snprintf(sbuf, msgsize-1,
-                                         "LATENCY:%"PRIu64,  wall_clock());
-                        } else if (do_seq) {
-                                rd_snprintf(sbuf,
-                                         msgsize-1, "%"PRIu64": ", seq);
-                                seq++;
-			}
-
-			if (sendflags & RD_KAFKA_MSG_F_FREE) {
-				/* Duplicate memory */
-				pbuf = malloc(msgsize);
-				memcpy(pbuf, sbuf, msgsize);
-			} else
-				pbuf = sbuf;
-
-                        if (msgsize == 0)
-                                pbuf = NULL;
-
-			cnt.tx++;
-			while (run &&
-			       rd_kafka_produce(rkt, partition,
-						sendflags, pbuf, msgsize,
-						key, keylen, NULL) == -1) {
-				rd_kafka_resp_err_t err = rd_kafka_last_error();
-				if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
-					printf("%% No such partition: "
-						   "%"PRId32"\n", partition);
-				else if (verbosity >= 3 ||
-					(err != RD_KAFKA_RESP_ERR__QUEUE_FULL && verbosity >= 1))
-					printf("%% produce error: %s%s\n",
-						   rd_kafka_err2str(err),
-						   err == RD_KAFKA_RESP_ERR__QUEUE_FULL ?
-						   " (backpressure)" : "");
-
-				cnt.tx_err++;
-				if (err != RD_KAFKA_RESP_ERR__QUEUE_FULL) {
-					run = 0;
-					break;
-				}
-				now = rd_clock();
-				if (verbosity >= 2 &&
-                                    cnt.t_enobufs_last + dispintvl <= now) {
-					printf("%% Backpressure %i "
-					       "(tx %"PRIu64", "
-					       "txerr %"PRIu64")\n",
-					       rd_kafka_outq_len(rk),
-					       cnt.tx, cnt.tx_err);
-					cnt.t_enobufs_last = now;
-				}
-
-				/* Poll to handle delivery reports */
-				rd_kafka_poll(rk, 10);
-
-                                print_stats(rk, mode, otype, compression);
-			}
-
-			msgs_wait_cnt++;
-			if (msgs_wait_produce_cnt != -1)
-				msgs_wait_produce_cnt--;
-			cnt.msgs++;
-			cnt.bytes += msgsize;
-
-                        if (rate_sleep) {
-#ifdef _MSC_VER
-                                Sleep(rate_sleep / 1000);
-#else
-                                usleep(rate_sleep);
-#endif
-                        }
-
-			/* Must poll to handle delivery reports */
-			rd_kafka_poll(rk, 0);
-
-			print_stats(rk, mode, otype, compression);
-		}
-
-		forever = 0;
-                if (verbosity >= 2)
-                        printf("%% All messages produced, "
-                               "now waiting for %li deliveries\n",
-                               msgs_wait_cnt);
-
-		/* Wait for messages to be delivered */
-                while (run && rd_kafka_poll(rk, 1000) != -1)
-			print_stats(rk, mode, otype, compression);
-
-
-		outq = rd_kafka_outq_len(rk);
-                if (verbosity >= 2)
-                        printf("%% %i messages in outq\n", outq);
-		cnt.msgs -= outq;
-		cnt.bytes -= msgsize * outq;
-
-		cnt.t_end = t_end;
-
-		if (cnt.tx_err > 0)
-			printf("%% %"PRIu64" backpressures for %"PRIu64
-			       " produce calls: %.3f%% backpressure rate\n",
-			       cnt.tx_err, cnt.tx,
-			       ((double)cnt.tx_err / (double)cnt.tx) * 100.0);
-
-		/* Destroy topic */
-		rd_kafka_topic_destroy(rkt);
-
-		/* Destroy the handle */
-		rd_kafka_destroy(rk);
-                global_rk = rk = NULL;
-
-		free(sbuf);
-
-                exitcode = cnt.msgs == cnt.msgs_dr_ok ? 0 : 1;
-
-	} else if (mode == 'C') {
-		/*
-		 * Consumer
-		 */
-
-		rd_kafka_message_t **rkmessages = NULL;
-		size_t i = 0;
-
-		/* Create Kafka handle */
-		if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
-					errstr, sizeof(errstr)))) {
-			fprintf(stderr,
-				"%% Failed to create Kafka consumer: %s\n",
-				errstr);
-			exit(1);
-		}
-
-                global_rk = rk;
-
-		/* Add broker(s) */
-		if (brokers && rd_kafka_brokers_add(rk, brokers) < 1) {
-			fprintf(stderr, "%% No valid brokers specified\n");
-			exit(1);
-		}
-
-		/* Create topic to consume from */
-		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
-
-		/* Batch consumer */
-		if (batch_size)
-			rkmessages = malloc(sizeof(*rkmessages) * batch_size);
-
-		/* Start consuming */
-		rkqu = rd_kafka_queue_new(rk);
-		for (i=0 ; i<(size_t)partition_cnt ; ++i) {
-			const int r = rd_kafka_consume_start_queue(rkt,
-				partitions[i], start_offset, rkqu);
-
-			if (r == -1) {
-                                fprintf(stderr, "%% Error creating queue: %s\n",
-                                        rd_kafka_err2str(rd_kafka_last_error()));
-				exit(1);
-			}
-		}
-
-		while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) {
-			/* Consume messages.
-			 * A message may either be a real message, or
-			 * an error signaling (if rkmessage->err is set).
-			 */
-			uint64_t fetch_latency;
-			ssize_t r;
-
-			fetch_latency = rd_clock();
-
-			if (batch_size) {
-				int i;
-				int partition = partitions ? partitions[0] :
-				    RD_KAFKA_PARTITION_UA;
-
-				/* Batch fetch mode */
-				r = rd_kafka_consume_batch(rkt, partition,
-							   1000,
-							   rkmessages,
-							   batch_size);
-				if (r != -1) {
-					for (i = 0 ; i < r ; i++) {
-						msg_consume(rkmessages[i],
-							NULL);
-						rd_kafka_message_destroy(
-							rkmessages[i]);
-					}
-				}
-			} else {
-				/* Queue mode */
-				r = rd_kafka_consume_callback_queue(rkqu, 1000,
-							msg_consume,
-							NULL);
-			}
-
-			cnt.t_fetch_latency += rd_clock() - fetch_latency;
-                        if (r == -1)
-                                fprintf(stderr, "%% Error: %s\n",
-                                        rd_kafka_err2str(rd_kafka_last_error()));
-
-			print_stats(rk, mode, otype, compression);
-
-			/* Poll to handle stats callbacks */
-			rd_kafka_poll(rk, 0);
-		}
-		cnt.t_end = rd_clock();
-
-		/* Stop consuming */
-		for (i=0 ; i<(size_t)partition_cnt ; ++i) {
-			int r = rd_kafka_consume_stop(rkt, (int32_t)i);
-			if (r == -1) {
-                                fprintf(stderr,
-                                        "%% Error in consume_stop: %s\n",
-                                        rd_kafka_err2str(rd_kafka_last_error()));
-			}
-		}
-		rd_kafka_queue_destroy(rkqu);
-
-		/* Destroy topic */
-		rd_kafka_topic_destroy(rkt);
-
-		if (batch_size)
-			free(rkmessages);
-
-		/* Destroy the handle */
-		rd_kafka_destroy(rk);
-
-                global_rk = rk = NULL;
-
-	} else if (mode == 'G') {
-		/*
-		 * High-level balanced Consumer
-		 */
-		rd_kafka_resp_err_t err;
-
-		rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
-		rd_kafka_conf_set_default_topic_conf(conf, topic_conf);
-
-		/* Create Kafka handle */
-		if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
-					errstr, sizeof(errstr)))) {
-			fprintf(stderr,
-				"%% Failed to create Kafka consumer: %s\n",
-				errstr);
-			exit(1);
-		}
-
-		/* Forward all events to consumer queue */
-		rd_kafka_poll_set_consumer(rk);
-
-                global_rk = rk;
-
-		/* Add broker(s) */
-		if (brokers && rd_kafka_brokers_add(rk, brokers) < 1) {
-			fprintf(stderr, "%% No valid brokers specified\n");
-			exit(1);
-		}
-
-		err = rd_kafka_subscribe(rk, topics);
-		if (err) {
-			fprintf(stderr, "%% Subscribe failed: %s\n",
-				rd_kafka_err2str(err));
-			exit(1);
-		}
-		fprintf(stderr, "%% Waiting for group rebalance..\n");
-
-		while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) {
-			/* Consume messages.
-			 * A message may either be a real message, or
-			 * an event (if rkmessage->err is set).
-			 */
-			rd_kafka_message_t *rkmessage;
-			uint64_t fetch_latency;
-
-			fetch_latency = rd_clock();
-
-			rkmessage = rd_kafka_consumer_poll(rk, 1000);
-			if (rkmessage) {
-				msg_consume(rkmessage, NULL);
-				rd_kafka_message_destroy(rkmessage);
-			}
-
-			cnt.t_fetch_latency += rd_clock() - fetch_latency;
-
-			print_stats(rk, mode, otype, compression);
-		}
-		cnt.t_end = rd_clock();
-
-		err = rd_kafka_consumer_close(rk);
-		if (err)
-			fprintf(stderr, "%% Failed to close consumer: %s\n",
-				rd_kafka_err2str(err));
-
-		rd_kafka_destroy(rk);
-	}
-
-	print_stats(NULL, mode, otype|_OTYPE_FORCE, compression);
-
-	if (cnt.t_fetch_latency && cnt.msgs)
-		printf("%% Average application fetch latency: %"PRIu64"us\n",
-		       cnt.t_fetch_latency / cnt.msgs);
-
-	if (latency_fp)
-		fclose(latency_fp);
-
-        if (stats_fp) {
-#ifndef _MSC_VER
-                pclose(stats_fp);
-#endif
-                stats_fp = NULL;
-        }
-
-        if (partitions)
-                free(partitions);
-
-	rd_kafka_topic_partition_list_destroy(topics);
-
-	/* Let background threads clean up and terminate cleanly. */
-	rd_kafka_wait_destroyed(2000);
-
-	return exitcode;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/examples/rdkafka_simple_producer.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/examples/rdkafka_simple_producer.c b/thirdparty/librdkafka-0.11.1/examples/rdkafka_simple_producer.c
deleted file mode 100644
index a353d01..0000000
--- a/thirdparty/librdkafka-0.11.1/examples/rdkafka_simple_producer.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Simple Apache Kafka producer
- * using the Kafka driver from librdkafka
- * (https://github.com/edenhill/librdkafka)
- */
-
-#include <stdio.h>
-#include <signal.h>
-#include <string.h>
-
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is builtin from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h"
-
-
-static int run = 1;
-
-/**
- * @brief Signal termination of program
- */
-static void stop (int sig) {
-        run = 0;
-        fclose(stdin); /* abort fgets() */
-}
-
-
-/**
- * @brief Message delivery report callback.
- *
- * This callback is called exactly once per message, indicating if
- * the message was succesfully delivered
- * (rkmessage->err == RD_KAFKA_RESP_ERR_NO_ERROR) or permanently
- * failed delivery (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR).
- *
- * The callback is triggered from rd_kafka_poll() and executes on
- * the application's thread.
- */
-static void dr_msg_cb (rd_kafka_t *rk,
-                       const rd_kafka_message_t *rkmessage, void *opaque) {
-        if (rkmessage->err)
-                fprintf(stderr, "%% Message delivery failed: %s\n",
-                        rd_kafka_err2str(rkmessage->err));
-        else
-                fprintf(stderr,
-                        "%% Message delivered (%zd bytes, "
-                        "partition %"PRId32")\n",
-                        rkmessage->len, rkmessage->partition);
-
-        /* The rkmessage is destroyed automatically by librdkafka */
-}
-
-
-
-int main (int argc, char **argv) {
-        rd_kafka_t *rk;         /* Producer instance handle */
-        rd_kafka_topic_t *rkt;  /* Topic object */
-        rd_kafka_conf_t *conf;  /* Temporary configuration object */
-        char errstr[512];       /* librdkafka API error reporting buffer */
-        char buf[512];          /* Message value temporary buffer */
-        const char *brokers;    /* Argument: broker list */
-        const char *topic;      /* Argument: topic to produce to */
-
-        /*
-         * Argument validation
-         */
-        if (argc != 3) {
-                fprintf(stderr, "%% Usage: %s <broker> <topic>\n", argv[0]);
-                return 1;
-        }
-
-        brokers = argv[1];
-        topic   = argv[2];
-
-
-        /*
-         * Create Kafka client configuration place-holder
-         */
-        conf = rd_kafka_conf_new();
-
-        /* Set bootstrap broker(s) as a comma-separated list of
-         * host or host:port (default port 9092).
-         * librdkafka will use the bootstrap brokers to acquire the full
-         * set of brokers from the cluster. */
-        if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers,
-                              errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) {
-                fprintf(stderr, "%s\n", errstr);
-                return 1;
-        }
-
-        /* Set the delivery report callback.
-         * This callback will be called once per message to inform
-         * the application if delivery succeeded or failed.
-         * See dr_msg_cb() above. */
-        rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
-
-
-        /*
-         * Create producer instance.
-         *
-         * NOTE: rd_kafka_new() takes ownership of the conf object
-         *       and the application must not reference it again after
-         *       this call.
-         */
-        rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
-        if (!rk) {
-                fprintf(stderr,
-                        "%% Failed to create new producer: %s\n", errstr);
-                return 1;
-        }
-
-
-        /* Create topic object that will be reused for each message
-         * produced.
-         *
-         * Both the producer instance (rd_kafka_t) and topic objects (topic_t)
-         * are long-lived objects that should be reused as much as possible.
-         */
-        rkt = rd_kafka_topic_new(rk, topic, NULL);
-        if (!rkt) {
-                fprintf(stderr, "%% Failed to create topic object: %s\n",
-                        rd_kafka_err2str(rd_kafka_last_error()));
-                rd_kafka_destroy(rk);
-                return 1;
-        }
-
-        /* Signal handler for clean shutdown */
-        signal(SIGINT, stop);
-
-        fprintf(stderr,
-                "%% Type some text and hit enter to produce message\n"
-                "%% Or just hit enter to only serve delivery reports\n"
-                "%% Press Ctrl-C or Ctrl-D to exit\n");
-
-        while (run && fgets(buf, sizeof(buf), stdin)) {
-                size_t len = strlen(buf);
-
-                if (buf[len-1] == '\n') /* Remove newline */
-                        buf[--len] = '\0';
-
-                if (len == 0) {
-                        /* Empty line: only serve delivery reports */
-                        rd_kafka_poll(rk, 0/*non-blocking */);
-                        continue;
-                }
-
-                /*
-                 * Send/Produce message.
-                 * This is an asynchronous call, on success it will only
-                 * enqueue the message on the internal producer queue.
-                 * The actual delivery attempts to the broker are handled
-                 * by background threads.
-                 * The previously registered delivery report callback
-                 * (dr_msg_cb) is used to signal back to the application
-                 * when the message has been delivered (or failed).
-                 */
-        retry:
-                if (rd_kafka_produce(
-                            /* Topic object */
-                            rkt,
-                            /* Use builtin partitioner to select partition*/
-                            RD_KAFKA_PARTITION_UA,
-                            /* Make a copy of the payload. */
-                            RD_KAFKA_MSG_F_COPY,
-                            /* Message payload (value) and length */
-                            buf, len,
-                            /* Optional key and its length */
-                            NULL, 0,
-                            /* Message opaque, provided in
-                             * delivery report callback as
-                             * msg_opaque. */
-                            NULL) == -1) {
-                        /**
-                         * Failed to *enqueue* message for producing.
-                         */
-                        fprintf(stderr,
-                                "%% Failed to produce to topic %s: %s\n",
-                                rd_kafka_topic_name(rkt),
-                                rd_kafka_err2str(rd_kafka_last_error()));
-
-                        /* Poll to handle delivery reports */
-                        if (rd_kafka_last_error() ==
-                            RD_KAFKA_RESP_ERR__QUEUE_FULL) {
-                                /* If the internal queue is full, wait for
-                                 * messages to be delivered and then retry.
-                                 * The internal queue represents both
-                                 * messages to be sent and messages that have
-                                 * been sent or failed, awaiting their
-                                 * delivery report callback to be called.
-                                 *
-                                 * The internal queue is limited by the
-                                 * configuration property
-                                 * queue.buffering.max.messages */
-                                rd_kafka_poll(rk, 1000/*block for max 1000ms*/);
-                                goto retry;
-                        }
-                } else {
-                        fprintf(stderr, "%% Enqueued message (%zd bytes) "
-                                "for topic %s\n",
-                                len, rd_kafka_topic_name(rkt));
-                }
-
-
-                /* A producer application should continually serve
-                 * the delivery report queue by calling rd_kafka_poll()
-                 * at frequent intervals.
-                 * Either put the poll call in your main loop, or in a
-                 * dedicated thread, or call it after every
-                 * rd_kafka_produce() call.
-                 * Just make sure that rd_kafka_poll() is still called
-                 * during periods where you are not producing any messages
-                 * to make sure previously produced messages have their
-                 * delivery report callback served (and any other callbacks
-                 * you register). */
-                rd_kafka_poll(rk, 0/*non-blocking*/);
-        }
-
-
-        /* Wait for final messages to be delivered or fail.
-         * rd_kafka_flush() is an abstraction over rd_kafka_poll() which
-         * waits for all messages to be delivered. */
-        fprintf(stderr, "%% Flushing final messages..\n");
-        rd_kafka_flush(rk, 10*1000 /* wait for max 10 seconds */);
-
-        /* Destroy topic object */
-        rd_kafka_topic_destroy(rkt);
-
-        /* Destroy the producer instance */
-        rd_kafka_destroy(rk);
-
-        return 0;
-}


[23/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_op.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_op.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_op.c
deleted file mode 100644
index a761e7a..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_op.c
+++ /dev/null
@@ -1,662 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdarg.h>
-
-#include "rdkafka_int.h"
-#include "rdkafka_op.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_offset.h"
-
-/* Current number of rd_kafka_op_t */
-rd_atomic32_t rd_kafka_op_cnt;
-
-
-const char *rd_kafka_op2str (rd_kafka_op_type_t type) {
-        int skiplen = 6;
-        static const char *names[] = {
-                [RD_KAFKA_OP_NONE] = "REPLY:NONE",
-                [RD_KAFKA_OP_FETCH] = "REPLY:FETCH",
-                [RD_KAFKA_OP_ERR] = "REPLY:ERR",
-                [RD_KAFKA_OP_CONSUMER_ERR] = "REPLY:CONSUMER_ERR",
-                [RD_KAFKA_OP_DR] = "REPLY:DR",
-                [RD_KAFKA_OP_STATS] = "REPLY:STATS",
-                [RD_KAFKA_OP_OFFSET_COMMIT] = "REPLY:OFFSET_COMMIT",
-                [RD_KAFKA_OP_NODE_UPDATE] = "REPLY:NODE_UPDATE",
-                [RD_KAFKA_OP_XMIT_BUF] = "REPLY:XMIT_BUF",
-                [RD_KAFKA_OP_RECV_BUF] = "REPLY:RECV_BUF",
-                [RD_KAFKA_OP_XMIT_RETRY] = "REPLY:XMIT_RETRY",
-                [RD_KAFKA_OP_FETCH_START] = "REPLY:FETCH_START",
-                [RD_KAFKA_OP_FETCH_STOP] = "REPLY:FETCH_STOP",
-                [RD_KAFKA_OP_SEEK] = "REPLY:SEEK",
-                [RD_KAFKA_OP_PAUSE] = "REPLY:PAUSE",
-                [RD_KAFKA_OP_OFFSET_FETCH] = "REPLY:OFFSET_FETCH",
-                [RD_KAFKA_OP_PARTITION_JOIN] = "REPLY:PARTITION_JOIN",
-                [RD_KAFKA_OP_PARTITION_LEAVE] = "REPLY:PARTITION_LEAVE",
-                [RD_KAFKA_OP_REBALANCE] = "REPLY:REBALANCE",
-                [RD_KAFKA_OP_TERMINATE] = "REPLY:TERMINATE",
-                [RD_KAFKA_OP_COORD_QUERY] = "REPLY:COORD_QUERY",
-                [RD_KAFKA_OP_SUBSCRIBE] = "REPLY:SUBSCRIBE",
-                [RD_KAFKA_OP_ASSIGN] = "REPLY:ASSIGN",
-                [RD_KAFKA_OP_GET_SUBSCRIPTION] = "REPLY:GET_SUBSCRIPTION",
-                [RD_KAFKA_OP_GET_ASSIGNMENT] = "REPLY:GET_ASSIGNMENT",
-                [RD_KAFKA_OP_THROTTLE] = "REPLY:THROTTLE",
-                [RD_KAFKA_OP_NAME] = "REPLY:NAME",
-                [RD_KAFKA_OP_OFFSET_RESET] = "REPLY:OFFSET_RESET",
-                [RD_KAFKA_OP_METADATA] = "REPLY:METADATA",
-                [RD_KAFKA_OP_LOG] = "REPLY:LOG",
-                [RD_KAFKA_OP_WAKEUP] = "REPLY:WAKEUP",
-        };
-
-        if (type & RD_KAFKA_OP_REPLY)
-                skiplen = 0;
-
-        return names[type & ~RD_KAFKA_OP_FLAGMASK]+skiplen;
-}
-
-
-void rd_kafka_op_print (FILE *fp, const char *prefix, rd_kafka_op_t *rko) {
-	fprintf(fp,
-		"%s((rd_kafka_op_t*)%p)\n"
-		"%s Type: %s (0x%x), Version: %"PRId32"\n",
-		prefix, rko,
-		prefix, rd_kafka_op2str(rko->rko_type), rko->rko_type,
-		rko->rko_version);
-	if (rko->rko_err)
-		fprintf(fp, "%s Error: %s\n",
-			prefix, rd_kafka_err2str(rko->rko_err));
-	if (rko->rko_replyq.q)
-		fprintf(fp, "%s Replyq %p v%d (%s)\n",
-			prefix, rko->rko_replyq.q, rko->rko_replyq.version,
-#if ENABLE_DEVEL
-			rko->rko_replyq._id
-#else
-			""
-#endif
-			);
-	if (rko->rko_rktp) {
-		rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(rko->rko_rktp);
-		fprintf(fp, "%s ((rd_kafka_toppar_t*)%p) "
-			"%s [%"PRId32"] v%d (shptr %p)\n",
-			prefix, rktp, rktp->rktp_rkt->rkt_topic->str,
-			rktp->rktp_partition,
-			rd_atomic32_get(&rktp->rktp_version), rko->rko_rktp);
-	}
-
-	switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK)
-	{
-	case RD_KAFKA_OP_FETCH:
-		fprintf(fp,  "%s Offset: %"PRId64"\n",
-			prefix, rko->rko_u.fetch.rkm.rkm_offset);
-		break;
-	case RD_KAFKA_OP_CONSUMER_ERR:
-		fprintf(fp,  "%s Offset: %"PRId64"\n",
-			prefix, rko->rko_u.err.offset);
-		/* FALLTHRU */
-	case RD_KAFKA_OP_ERR:
-		fprintf(fp, "%s Reason: %s\n", prefix, rko->rko_u.err.errstr);
-		break;
-	case RD_KAFKA_OP_DR:
-		fprintf(fp, "%s %"PRId32" messages on %s\n", prefix,
-			rd_atomic32_get(&rko->rko_u.dr.msgq.rkmq_msg_cnt),
-			rko->rko_u.dr.s_rkt ?
-			rd_kafka_topic_s2i(rko->rko_u.dr.s_rkt)->
-			rkt_topic->str : "(n/a)");
-		break;
-	case RD_KAFKA_OP_OFFSET_COMMIT:
-		fprintf(fp, "%s Callback: %p (opaque %p)\n",
-			prefix, rko->rko_u.offset_commit.cb,
-			rko->rko_u.offset_commit.opaque);
-		fprintf(fp, "%s %d partitions\n",
-			prefix,
-			rko->rko_u.offset_commit.partitions ?
-			rko->rko_u.offset_commit.partitions->cnt : 0);
-		break;
-
-        case RD_KAFKA_OP_LOG:
-                fprintf(fp, "%s Log: %%%d %s: %s\n",
-                        prefix, rko->rko_u.log.level,
-                        rko->rko_u.log.fac,
-                        rko->rko_u.log.str);
-                break;
-
-	default:
-		break;
-	}
-}
-
-
-rd_kafka_op_t *rd_kafka_op_new0 (const char *source, rd_kafka_op_type_t type) {
-	rd_kafka_op_t *rko;
-        static const size_t op2size[RD_KAFKA_OP__END] = {
-                [RD_KAFKA_OP_FETCH] = sizeof(rko->rko_u.fetch),
-                [RD_KAFKA_OP_ERR] = sizeof(rko->rko_u.err),
-                [RD_KAFKA_OP_CONSUMER_ERR] = sizeof(rko->rko_u.err),
-                [RD_KAFKA_OP_DR] = sizeof(rko->rko_u.dr),
-                [RD_KAFKA_OP_STATS] = sizeof(rko->rko_u.stats),
-                [RD_KAFKA_OP_OFFSET_COMMIT] = sizeof(rko->rko_u.offset_commit),
-                [RD_KAFKA_OP_NODE_UPDATE] = sizeof(rko->rko_u.node),
-                [RD_KAFKA_OP_XMIT_BUF] = sizeof(rko->rko_u.xbuf),
-                [RD_KAFKA_OP_RECV_BUF] = sizeof(rko->rko_u.xbuf),
-                [RD_KAFKA_OP_XMIT_RETRY] = sizeof(rko->rko_u.xbuf),
-                [RD_KAFKA_OP_FETCH_START] = sizeof(rko->rko_u.fetch_start),
-                [RD_KAFKA_OP_FETCH_STOP] = 0,
-                [RD_KAFKA_OP_SEEK] = sizeof(rko->rko_u.fetch_start),
-                [RD_KAFKA_OP_PAUSE] = sizeof(rko->rko_u.pause),
-                [RD_KAFKA_OP_OFFSET_FETCH] = sizeof(rko->rko_u.offset_fetch),
-                [RD_KAFKA_OP_PARTITION_JOIN] = 0,
-                [RD_KAFKA_OP_PARTITION_LEAVE] = 0,
-                [RD_KAFKA_OP_REBALANCE] = sizeof(rko->rko_u.rebalance),
-                [RD_KAFKA_OP_TERMINATE] = 0,
-                [RD_KAFKA_OP_COORD_QUERY] = 0,
-                [RD_KAFKA_OP_SUBSCRIBE] = sizeof(rko->rko_u.subscribe),
-                [RD_KAFKA_OP_ASSIGN] = sizeof(rko->rko_u.assign),
-                [RD_KAFKA_OP_GET_SUBSCRIPTION] = sizeof(rko->rko_u.subscribe),
-                [RD_KAFKA_OP_GET_ASSIGNMENT] = sizeof(rko->rko_u.assign),
-                [RD_KAFKA_OP_THROTTLE] = sizeof(rko->rko_u.throttle),
-                [RD_KAFKA_OP_NAME] = sizeof(rko->rko_u.name),
-                [RD_KAFKA_OP_OFFSET_RESET] = sizeof(rko->rko_u.offset_reset),
-                [RD_KAFKA_OP_METADATA] = sizeof(rko->rko_u.metadata),
-                [RD_KAFKA_OP_LOG] = sizeof(rko->rko_u.log),
-                [RD_KAFKA_OP_WAKEUP] = 0,
-	};
-	size_t tsize = op2size[type & ~RD_KAFKA_OP_FLAGMASK];
-
-	rko = rd_calloc(1, sizeof(*rko)-sizeof(rko->rko_u)+tsize);
-	rko->rko_type = type;
-
-#if ENABLE_DEVEL
-        rko->rko_source = source;
-        rd_atomic32_add(&rd_kafka_op_cnt, 1);
-#endif
-	return rko;
-}
-
-
-void rd_kafka_op_destroy (rd_kafka_op_t *rko) {
-
-	switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK)
-	{
-	case RD_KAFKA_OP_FETCH:
-		rd_kafka_msg_destroy(NULL, &rko->rko_u.fetch.rkm);
-		/* Decrease refcount on rkbuf to eventually rd_free shared buf*/
-		if (rko->rko_u.fetch.rkbuf)
-			rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY);
-
-		break;
-
-	case RD_KAFKA_OP_OFFSET_FETCH:
-		if (rko->rko_u.offset_fetch.partitions &&
-		    rko->rko_u.offset_fetch.do_free)
-			rd_kafka_topic_partition_list_destroy(
-				rko->rko_u.offset_fetch.partitions);
-		break;
-
-	case RD_KAFKA_OP_OFFSET_COMMIT:
-		RD_IF_FREE(rko->rko_u.offset_commit.partitions,
-			   rd_kafka_topic_partition_list_destroy);
-                RD_IF_FREE(rko->rko_u.offset_commit.reason, rd_free);
-		break;
-
-	case RD_KAFKA_OP_SUBSCRIBE:
-	case RD_KAFKA_OP_GET_SUBSCRIPTION:
-		RD_IF_FREE(rko->rko_u.subscribe.topics,
-			   rd_kafka_topic_partition_list_destroy);
-		break;
-
-	case RD_KAFKA_OP_ASSIGN:
-	case RD_KAFKA_OP_GET_ASSIGNMENT:
-		RD_IF_FREE(rko->rko_u.assign.partitions,
-			   rd_kafka_topic_partition_list_destroy);
-		break;
-
-	case RD_KAFKA_OP_REBALANCE:
-		RD_IF_FREE(rko->rko_u.rebalance.partitions,
-			   rd_kafka_topic_partition_list_destroy);
-		break;
-
-	case RD_KAFKA_OP_NAME:
-		RD_IF_FREE(rko->rko_u.name.str, rd_free);
-		break;
-
-	case RD_KAFKA_OP_ERR:
-	case RD_KAFKA_OP_CONSUMER_ERR:
-		RD_IF_FREE(rko->rko_u.err.errstr, rd_free);
-		rd_kafka_msg_destroy(NULL, &rko->rko_u.err.rkm);
-		break;
-
-		break;
-
-	case RD_KAFKA_OP_THROTTLE:
-		RD_IF_FREE(rko->rko_u.throttle.nodename, rd_free);
-		break;
-
-	case RD_KAFKA_OP_STATS:
-		RD_IF_FREE(rko->rko_u.stats.json, rd_free);
-		break;
-
-	case RD_KAFKA_OP_XMIT_RETRY:
-	case RD_KAFKA_OP_XMIT_BUF:
-	case RD_KAFKA_OP_RECV_BUF:
-		if (rko->rko_u.xbuf.rkbuf)
-			rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY);
-
-		RD_IF_FREE(rko->rko_u.xbuf.rkbuf, rd_kafka_buf_destroy);
-		break;
-
-	case RD_KAFKA_OP_DR:
-		rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq);
-		if (rko->rko_u.dr.do_purge2)
-			rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq2);
-
-		if (rko->rko_u.dr.s_rkt)
-			rd_kafka_topic_destroy0(rko->rko_u.dr.s_rkt);
-		break;
-
-	case RD_KAFKA_OP_OFFSET_RESET:
-		RD_IF_FREE(rko->rko_u.offset_reset.reason, rd_free);
-		break;
-
-        case RD_KAFKA_OP_METADATA:
-                RD_IF_FREE(rko->rko_u.metadata.md, rd_kafka_metadata_destroy);
-                break;
-
-        case RD_KAFKA_OP_LOG:
-                rd_free(rko->rko_u.log.str);
-                break;
-
-	default:
-		break;
-	}
-
-        if (rko->rko_type & RD_KAFKA_OP_CB && rko->rko_op_cb) {
-                rd_kafka_op_res_t res;
-                /* Let callback clean up */
-                rko->rko_err = RD_KAFKA_RESP_ERR__DESTROY;
-                res = rko->rko_op_cb(rko->rko_rk, NULL, rko);
-                assert(res != RD_KAFKA_OP_RES_YIELD);
-        }
-
-	RD_IF_FREE(rko->rko_rktp, rd_kafka_toppar_destroy);
-
-	rd_kafka_replyq_destroy(&rko->rko_replyq);
-
-#if ENABLE_DEVEL
-        if (rd_atomic32_sub(&rd_kafka_op_cnt, 1) < 0)
-                rd_kafka_assert(NULL, !*"rd_kafka_op_cnt < 0");
-#endif
-
-	rd_free(rko);
-}
-
-
-
-
-
-
-
-
-
-
-
-/**
- * Propagate an error event to the application on a specific queue.
- * \p optype should be RD_KAFKA_OP_ERR for generic errors and
- * RD_KAFKA_OP_CONSUMER_ERR for consumer errors.
- */
-void rd_kafka_q_op_err (rd_kafka_q_t *rkq, rd_kafka_op_type_t optype,
-                        rd_kafka_resp_err_t err, int32_t version,
-			rd_kafka_toppar_t *rktp, int64_t offset,
-                        const char *fmt, ...) {
-	va_list ap;
-	char buf[2048];
-	rd_kafka_op_t *rko;
-
-	va_start(ap, fmt);
-	rd_vsnprintf(buf, sizeof(buf), fmt, ap);
-	va_end(ap);
-
-	rko = rd_kafka_op_new(optype);
-	rko->rko_version = version;
-	rko->rko_err = err;
-	rko->rko_u.err.offset = offset;
-	rko->rko_u.err.errstr = rd_strdup(buf);
-	if (rktp)
-		rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
-	rd_kafka_q_enq(rkq, rko);
-}
-
-
-
-/**
- * Creates a reply opp based on 'rko_orig'.
- * If 'rko_orig' has rko_op_cb set the reply op will be OR:ed with
- * RD_KAFKA_OP_CB, else the reply type will be the original rko_type OR:ed
- * with RD_KAFKA_OP_REPLY.
- */
-rd_kafka_op_t *rd_kafka_op_new_reply (rd_kafka_op_t *rko_orig,
-				      rd_kafka_resp_err_t err) {
-        rd_kafka_op_t *rko;
-
-        rko = rd_kafka_op_new(rko_orig->rko_type |
-			      (rko_orig->rko_op_cb ?
-			       RD_KAFKA_OP_CB : RD_KAFKA_OP_REPLY));
-	rd_kafka_op_get_reply_version(rko, rko_orig);
-	rko->rko_op_cb   = rko_orig->rko_op_cb;
-	rko->rko_err     = err;
-	if (rko_orig->rko_rktp)
-		rko->rko_rktp = rd_kafka_toppar_keep(
-			rd_kafka_toppar_s2i(rko_orig->rko_rktp));
-
-        return rko;
-}
-
-
-/**
- * @brief Create new callback op for type \p type
- */
-rd_kafka_op_t *rd_kafka_op_new_cb (rd_kafka_t *rk,
-                                   rd_kafka_op_type_t type,
-                                   rd_kafka_op_cb_t *cb) {
-        rd_kafka_op_t *rko;
-        rko = rd_kafka_op_new(type | RD_KAFKA_OP_CB);
-        rko->rko_op_cb = cb;
-        rko->rko_rk = rk;
-        return rko;
-}
-
-
-
-/**
- * @brief Reply to 'rko' re-using the same rko.
- * If there is no replyq the rko is destroyed.
- *
- * @returns 1 if op was enqueued, else 0 and rko is destroyed.
- */
-int rd_kafka_op_reply (rd_kafka_op_t *rko, rd_kafka_resp_err_t err) {
-
-        if (!rko->rko_replyq.q) {
-		rd_kafka_op_destroy(rko);
-                return 0;
-	}
-
-	rko->rko_type |= (rko->rko_op_cb ? RD_KAFKA_OP_CB : RD_KAFKA_OP_REPLY);
-        rko->rko_err   = err;
-
-	return rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0);
-}
-
-
-/**
- * @brief Send request to queue, wait for response.
- *
- * @returns response on success or NULL if destq is disabled.
- */
-rd_kafka_op_t *rd_kafka_op_req0 (rd_kafka_q_t *destq,
-                                 rd_kafka_q_t *recvq,
-                                 rd_kafka_op_t *rko,
-                                 int timeout_ms) {
-        rd_kafka_op_t *reply;
-
-        /* Indicate to destination where to send reply. */
-        rd_kafka_op_set_replyq(rko, recvq, NULL);
-
-        /* Enqueue op */
-        if (!rd_kafka_q_enq(destq, rko))
-                return NULL;
-
-        /* Wait for reply */
-        reply = rd_kafka_q_pop(recvq, timeout_ms, 0);
-
-        /* May be NULL for timeout */
-        return reply;
-}
-
-/**
- * Send request to queue, wait for response.
- * Creates a temporary reply queue.
- */
-rd_kafka_op_t *rd_kafka_op_req (rd_kafka_q_t *destq,
-                                rd_kafka_op_t *rko,
-                                int timeout_ms) {
-        rd_kafka_q_t *recvq;
-        rd_kafka_op_t *reply;
-
-        recvq = rd_kafka_q_new(destq->rkq_rk);
-
-        reply = rd_kafka_op_req0(destq, recvq, rko, timeout_ms);
-
-        rd_kafka_q_destroy(recvq);
-
-        return reply;
-}
-
-
-/**
- * Send simple type-only request to queue, wait for response.
- */
-rd_kafka_op_t *rd_kafka_op_req2 (rd_kafka_q_t *destq, rd_kafka_op_type_t type) {
-        rd_kafka_op_t *rko;
-
-        rko = rd_kafka_op_new(type);
-        return rd_kafka_op_req(destq, rko, RD_POLL_INFINITE);
-}
-
-/**
- * Destroys the rko and returns its error.
- */
-rd_kafka_resp_err_t rd_kafka_op_err_destroy (rd_kafka_op_t *rko) {
-        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__TIMED_OUT;
-
-	if (rko) {
-		err = rko->rko_err;
-		rd_kafka_op_destroy(rko);
-	}
-        return err;
-}
-
-
-/**
- * Call op callback
- */
-rd_kafka_op_res_t rd_kafka_op_call (rd_kafka_t *rk, rd_kafka_q_t *rkq,
-                                    rd_kafka_op_t *rko) {
-        rd_kafka_op_res_t res;
-        res = rko->rko_op_cb(rk, rkq, rko);
-        if (unlikely(res == RD_KAFKA_OP_RES_YIELD || rd_kafka_yield_thread))
-                return RD_KAFKA_OP_RES_YIELD;
-        rko->rko_op_cb = NULL;
-        return res;
-}
-
-
-/**
- * @brief Creates a new RD_KAFKA_OP_FETCH op and sets up the
- *        embedded message according to the parameters.
- *
- * @param rkmp will be set to the embedded rkm in the rko (for convenience)
- * @param offset may be updated later if relative offset.
- */
-rd_kafka_op_t *
-rd_kafka_op_new_fetch_msg (rd_kafka_msg_t **rkmp,
-                           rd_kafka_toppar_t *rktp,
-                           int32_t version,
-                           rd_kafka_buf_t *rkbuf,
-                           int64_t offset,
-                           size_t key_len, const void *key,
-                           size_t val_len, const void *val) {
-        rd_kafka_msg_t *rkm;
-        rd_kafka_op_t *rko;
-
-        rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH);
-        rko->rko_rktp    = rd_kafka_toppar_keep(rktp);
-        rko->rko_version = version;
-        rkm   = &rko->rko_u.fetch.rkm;
-        *rkmp = rkm;
-
-        /* Since all the ops share the same payload buffer
-         * a refcnt is used on the rkbuf that makes sure all
-         * consume_cb() will have been
-         * called for each of these ops before the rkbuf
-         * and its memory backing buffers are freed. */
-        rko->rko_u.fetch.rkbuf = rkbuf;
-        rd_kafka_buf_keep(rkbuf);
-
-        rkm->rkm_offset    = offset;
-
-        rkm->rkm_key       = (void *)key;
-        rkm->rkm_key_len   = key_len;
-
-        rkm->rkm_payload   = (void *)val;
-        rkm->rkm_len       = val_len;
-        rko->rko_len       = (int32_t)rkm->rkm_len;
-
-        rkm->rkm_partition = rktp->rktp_partition;
-
-        return rko;
-}
-
-
-/**
- * Enqueue ERR__THROTTLE op, if desired.
- */
-void rd_kafka_op_throttle_time (rd_kafka_broker_t *rkb,
-				rd_kafka_q_t *rkq,
-				int throttle_time) {
-	rd_kafka_op_t *rko;
-
-	rd_avg_add(&rkb->rkb_avg_throttle, throttle_time);
-
-	/* We send throttle events when:
-	 *  - throttle_time > 0
-	 *  - throttle_time == 0 and last throttle_time > 0
-	 */
-	if (!rkb->rkb_rk->rk_conf.throttle_cb ||
-	    (!throttle_time && !rd_atomic32_get(&rkb->rkb_rk->rk_last_throttle)))
-		return;
-
-	rd_atomic32_set(&rkb->rkb_rk->rk_last_throttle, throttle_time);
-
-	rko = rd_kafka_op_new(RD_KAFKA_OP_THROTTLE);
-        rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH);
-	rko->rko_u.throttle.nodename = rd_strdup(rkb->rkb_nodename);
-	rko->rko_u.throttle.nodeid   = rkb->rkb_nodeid;
-	rko->rko_u.throttle.throttle_time = throttle_time;
-	rd_kafka_q_enq(rkq, rko);
-}
-
-
-/**
- * @brief Handle standard op types.
- */
-rd_kafka_op_res_t
-rd_kafka_op_handle_std (rd_kafka_t *rk, rd_kafka_q_t *rkq,
-                        rd_kafka_op_t *rko, int cb_type) {
-        if (cb_type == RD_KAFKA_Q_CB_FORCE_RETURN)
-                return RD_KAFKA_OP_RES_PASS;
-        else if (cb_type != RD_KAFKA_Q_CB_EVENT &&
-                 rko->rko_type & RD_KAFKA_OP_CB)
-                return rd_kafka_op_call(rk, rkq, rko);
-        else if (rko->rko_type == RD_KAFKA_OP_RECV_BUF) /* Handle Response */
-                rd_kafka_buf_handle_op(rko, rko->rko_err);
-        else if (rko->rko_type == RD_KAFKA_OP_WAKEUP)
-                ;/* do nothing, wake up is a fact anyway */
-        else if (cb_type != RD_KAFKA_Q_CB_RETURN &&
-                 rko->rko_type & RD_KAFKA_OP_REPLY &&
-                 rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
-                return RD_KAFKA_OP_RES_HANDLED; /* dest queue was
-                                                 * probably disabled. */
-        else
-                return RD_KAFKA_OP_RES_PASS;
-
-        return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-/**
- * @brief Attempt to handle op using its queue's serve callback,
- *        or the passed callback, or op_handle_std(), else do nothing.
- *
- * @param rkq is \p rko's queue (which it was unlinked from) with rkq_lock
- *            being held. Callback may re-enqueue the op on this queue
- *            and return YIELD.
- *
- * @returns HANDLED if op was handled (and destroyed), PASS if not,
- *          or YIELD if op was handled (maybe destroyed or re-enqueued)
- *          and caller must propagate yield upwards (cancel and return).
- */
-rd_kafka_op_res_t
-rd_kafka_op_handle (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko,
-                    rd_kafka_q_cb_type_t cb_type, void *opaque,
-                    rd_kafka_q_serve_cb_t *callback) {
-        rd_kafka_op_res_t res;
-
-        res = rd_kafka_op_handle_std(rk, rkq, rko, cb_type);
-        if (res == RD_KAFKA_OP_RES_HANDLED) {
-                rd_kafka_op_destroy(rko);
-                return res;
-        } else if (unlikely(res == RD_KAFKA_OP_RES_YIELD))
-                return res;
-
-        if (rko->rko_serve) {
-                callback = rko->rko_serve;
-                opaque   = rko->rko_serve_opaque;
-                rko->rko_serve        = NULL;
-                rko->rko_serve_opaque = NULL;
-        }
-
-        if (callback)
-                res = callback(rk, rkq, rko, cb_type, opaque);
-
-        return res;
-}
-
-
-/**
- * @brief Store offset for fetched message.
- */
-void rd_kafka_op_offset_store (rd_kafka_t *rk, rd_kafka_op_t *rko,
-			       const rd_kafka_message_t *rkmessage) {
-	rd_kafka_toppar_t *rktp;
-
-	if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH || rko->rko_err))
-		return;
-
-	rktp = rd_kafka_toppar_s2i(rko->rko_rktp);
-
-	if (unlikely(!rk))
-		rk = rktp->rktp_rkt->rkt_rk;
-
-	rd_kafka_toppar_lock(rktp);
-	rktp->rktp_app_offset = rkmessage->offset+1;
-	if (rk->rk_conf.enable_auto_offset_store)
-		rd_kafka_offset_store0(rktp, rkmessage->offset+1, 0/*no lock*/);
-	rd_kafka_toppar_unlock(rktp);
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_op.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_op.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_op.h
deleted file mode 100644
index f0af481..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_op.h
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#pragma once
-
-
-#include "rdkafka_msg.h"
-
-/* Forward declarations */
-typedef struct rd_kafka_q_s rd_kafka_q_t;
-typedef struct rd_kafka_toppar_s rd_kafka_toppar_t;
-typedef struct rd_kafka_op_s rd_kafka_op_t;
-
-/* One-off reply queue + reply version.
- * All APIs that take a rd_kafka_replyq_t makes a copy of the
- * struct as-is and grabs hold of the existing .q refcount.
- * Think of replyq as a (Q,VERSION) tuple. */
-typedef struct rd_kafka_replyq_s {
-	rd_kafka_q_t *q;
-	int32_t       version;
-#if ENABLE_DEVEL
-	char *_id; /* Devel id used for debugging reference leaks.
-		    * Is a strdup() of the caller's function name,
-		    * which makes for easy debugging with valgrind. */
-#endif
-} rd_kafka_replyq_t;
-
-
-
-
-/**
- * Flags used by:
- *   - rd_kafka_op_t.rko_flags
- *   - rd_kafka_buf_t.rkbuf_flags
- */
-#define RD_KAFKA_OP_F_FREE        0x1  /* rd_free payload when done with it */
-#define RD_KAFKA_OP_F_FLASH       0x2  /* Internal: insert at head of queue */
-#define RD_KAFKA_OP_F_NO_RESPONSE 0x4  /* rkbuf: Not expecting a response */
-#define RD_KAFKA_OP_F_CRC         0x8  /* rkbuf: Perform CRC calculation */
-#define RD_KAFKA_OP_F_BLOCKING    0x10 /* rkbuf: blocking protocol request */
-#define RD_KAFKA_OP_F_REPROCESS   0x20 /* cgrp: Reprocess at a later time. */
-
-
-typedef enum {
-        RD_KAFKA_OP_NONE,     /* No specific type, use OP_CB */
-	RD_KAFKA_OP_FETCH,    /* Kafka thread -> Application */
-	RD_KAFKA_OP_ERR,      /* Kafka thread -> Application */
-        RD_KAFKA_OP_CONSUMER_ERR, /* Kafka thread -> Application */
-	RD_KAFKA_OP_DR,       /* Kafka thread -> Application
-			       * Produce message delivery report */
-	RD_KAFKA_OP_STATS,    /* Kafka thread -> Application */
-
-        RD_KAFKA_OP_OFFSET_COMMIT, /* any -> toppar's Broker thread */
-        RD_KAFKA_OP_NODE_UPDATE,   /* any -> Broker thread: node update */
-
-        RD_KAFKA_OP_XMIT_BUF, /* transmit buffer: any -> broker thread */
-        RD_KAFKA_OP_RECV_BUF, /* received response buffer: broker thr -> any */
-        RD_KAFKA_OP_XMIT_RETRY, /* retry buffer xmit: any -> broker thread */
-        RD_KAFKA_OP_FETCH_START, /* Application -> toppar's handler thread */
-        RD_KAFKA_OP_FETCH_STOP,  /* Application -> toppar's handler thread */
-        RD_KAFKA_OP_SEEK,        /* Application -> toppar's handler thread */
-	RD_KAFKA_OP_PAUSE,       /* Application -> toppar's handler thread */
-        RD_KAFKA_OP_OFFSET_FETCH, /* Broker -> broker thread: fetch offsets
-                                   * for topic. */
-
-        RD_KAFKA_OP_PARTITION_JOIN,  /* * -> cgrp op:   add toppar to cgrp
-                                      * * -> broker op: add toppar to broker */
-        RD_KAFKA_OP_PARTITION_LEAVE, /* * -> cgrp op:   remove toppar from cgrp
-                                      * * -> broker op: remove toppar from rkb*/
-        RD_KAFKA_OP_REBALANCE,       /* broker thread -> app:
-                                      * group rebalance */
-        RD_KAFKA_OP_TERMINATE,       /* For generic use */
-        RD_KAFKA_OP_COORD_QUERY,     /* Query for coordinator */
-        RD_KAFKA_OP_SUBSCRIBE,       /* New subscription */
-        RD_KAFKA_OP_ASSIGN,          /* New assignment */
-        RD_KAFKA_OP_GET_SUBSCRIPTION,/* Get current subscription.
-				      * Reuses u.subscribe */
-        RD_KAFKA_OP_GET_ASSIGNMENT,  /* Get current assignment.
-				      * Reuses u.assign */
-	RD_KAFKA_OP_THROTTLE,        /* Throttle info */
-	RD_KAFKA_OP_NAME,            /* Request name */
-	RD_KAFKA_OP_OFFSET_RESET,    /* Offset reset */
-        RD_KAFKA_OP_METADATA,        /* Metadata response */
-        RD_KAFKA_OP_LOG,             /* Log */
-        RD_KAFKA_OP_WAKEUP,          /* Wake-up signaling */
-        RD_KAFKA_OP__END
-} rd_kafka_op_type_t;
-
-/* Flags used with op_type_t */
-#define RD_KAFKA_OP_CB        (1 << 30)  /* Callback op. */
-#define RD_KAFKA_OP_REPLY     (1 << 31)  /* Reply op. */
-#define RD_KAFKA_OP_FLAGMASK  (RD_KAFKA_OP_CB | RD_KAFKA_OP_REPLY)
-
-
-/**
- * @brief Op/queue priority levels.
- * @remark Since priority levels alter the FIFO order, pay extra attention
- *         to preserve ordering as deemed necessary.
- * @remark Priority should only be set on ops destined for application
- *         facing queues (rk_rep, rkcg_q, etc).
- */
-typedef enum {
-        RD_KAFKA_PRIO_NORMAL = 0,   /* Normal bulk, messages, DRs, etc. */
-        RD_KAFKA_PRIO_MEDIUM,       /* Prioritize in front of bulk,
-                                     * still at some scale. e.g. logs, .. */
-        RD_KAFKA_PRIO_HIGH,         /* Small scale high priority */
-        RD_KAFKA_PRIO_FLASH         /* Micro scale, immediate delivery. */
-} rd_kafka_op_prio_t;
-
-
-/**
- * @brief Op handler result
- *
- * @remark When returning YIELD from a handler the handler will
- *         need to have made sure to either re-enqueue the op or destroy it
- *         since the caller will not touch the op anymore.
- */
-typedef enum {
-        RD_KAFKA_OP_RES_PASS,    /* Not handled, pass to caller */
-        RD_KAFKA_OP_RES_HANDLED, /* Op was handled (through callbacks) */
-        RD_KAFKA_OP_RES_YIELD    /* Callback called yield */
-} rd_kafka_op_res_t;
-
-
-/**
- * @brief Queue serve callback call type
- */
-typedef enum {
-        RD_KAFKA_Q_CB_INVALID, /* dont use */
-        RD_KAFKA_Q_CB_CALLBACK,/* trigger callback based on op */
-        RD_KAFKA_Q_CB_RETURN,  /* return op rather than trigger callback
-                                * (if possible)*/
-        RD_KAFKA_Q_CB_FORCE_RETURN, /* return op, regardless of callback. */
-        RD_KAFKA_Q_CB_EVENT    /* like _Q_CB_RETURN but return event_t:ed op */
-} rd_kafka_q_cb_type_t;
-
-/**
- * @brief Queue serve callback
- * @remark See rd_kafka_op_res_t docs for return semantics.
- */
-typedef rd_kafka_op_res_t
-(rd_kafka_q_serve_cb_t) (rd_kafka_t *rk,
-                         struct rd_kafka_q_s *rkq,
-                         struct rd_kafka_op_s *rko,
-                         rd_kafka_q_cb_type_t cb_type, void *opaque)
-        RD_WARN_UNUSED_RESULT;
-
-/**
- * @brief Op callback type
- */
-typedef rd_kafka_op_res_t (rd_kafka_op_cb_t) (rd_kafka_t *rk,
-                                              rd_kafka_q_t *rkq,
-                                              struct rd_kafka_op_s *rko)
-                RD_WARN_UNUSED_RESULT;
-
-
-#define RD_KAFKA_OP_TYPE_ASSERT(rko,type) \
-	rd_kafka_assert(NULL, (rko)->rko_type == (type) && # type)
-
-struct rd_kafka_op_s {
-	TAILQ_ENTRY(rd_kafka_op_s) rko_link;
-
-	rd_kafka_op_type_t    rko_type;   /* Internal op type */
-	rd_kafka_event_type_t rko_evtype;
-	int                   rko_flags;  /* See RD_KAFKA_OP_F_... above */
-	int32_t               rko_version;
-	rd_kafka_resp_err_t   rko_err;
-	int32_t               rko_len;    /* Depends on type, typically the
-					   * message length. */
-        rd_kafka_op_prio_t    rko_prio;   /* In-queue priority.
-                                           * Higher value means higher prio. */
-
-	shptr_rd_kafka_toppar_t *rko_rktp;
-
-        /*
-	 * Generic fields
-	 */
-
-	/* Indicates request: enqueue reply on rko_replyq.q with .version.
-	 * .q is refcounted. */
-	rd_kafka_replyq_t rko_replyq;
-
-        /* Original queue's op serve callback and opaque, if any.
-         * Mainly used for forwarded queues to use the original queue's
-         * serve function from the forwarded position. */
-        rd_kafka_q_serve_cb_t *rko_serve;
-        void *rko_serve_opaque;
-
-	rd_kafka_t     *rko_rk;
-
-#if ENABLE_DEVEL
-        const char *rko_source;  /**< Where op was created */
-#endif
-
-        /* RD_KAFKA_OP_CB */
-        rd_kafka_op_cb_t *rko_op_cb;
-
-	union {
-		struct {
-			rd_kafka_buf_t *rkbuf;
-			rd_kafka_msg_t  rkm;
-			int evidx;
-		} fetch;
-
-		struct {
-			rd_kafka_topic_partition_list_t *partitions;
-			int do_free; /* free .partitions on destroy() */
-		} offset_fetch;
-
-		struct {
-			rd_kafka_topic_partition_list_t *partitions;
-			void (*cb) (rd_kafka_t *rk,
-				    rd_kafka_resp_err_t err,
-				    rd_kafka_topic_partition_list_t *offsets,
-				    void *opaque);
-			void *opaque;
-			int silent_empty; /**< Fail silently if there are no
-					   *   offsets to commit. */
-                        rd_ts_t ts_timeout;
-                        char *reason;
-		} offset_commit;
-
-		struct {
-			rd_kafka_topic_partition_list_t *topics;
-		} subscribe; /* also used for GET_SUBSCRIPTION */
-
-		struct {
-			rd_kafka_topic_partition_list_t *partitions;
-		} assign; /* also used for GET_ASSIGNMENT */
-
-		struct {
-			rd_kafka_topic_partition_list_t *partitions;
-		} rebalance;
-
-		struct {
-			char *str;
-		} name;
-
-		struct {
-			int64_t offset;
-			char *errstr;
-			rd_kafka_msg_t rkm;
-		} err;  /* used for ERR and CONSUMER_ERR */
-
-		struct {
-			int throttle_time;
-			int32_t nodeid;
-			char *nodename;
-		} throttle;
-
-		struct {
-			char *json;
-			size_t json_len;
-		} stats;
-
-		struct {
-			rd_kafka_buf_t *rkbuf;
-		} xbuf; /* XMIT_BUF and RECV_BUF */
-
-                /* RD_KAFKA_OP_METADATA */
-                struct {
-                        rd_kafka_metadata_t *md;
-                        int force; /* force request regardless of outstanding
-                                    * metadata requests. */
-                } metadata;
-
-		struct {
-			shptr_rd_kafka_itopic_t *s_rkt;
-			rd_kafka_msgq_t msgq;
-			rd_kafka_msgq_t msgq2;
-			int do_purge2;
-		} dr;
-
-		struct {
-			int32_t nodeid;
-			char    nodename[RD_KAFKA_NODENAME_SIZE];
-		} node;
-
-		struct {
-			int64_t offset;
-			char *reason;
-		} offset_reset;
-
-		struct {
-			int64_t offset;
-			struct rd_kafka_cgrp_s *rkcg;
-		} fetch_start; /* reused for SEEK */
-
-		struct {
-			int pause;
-			int flag;
-		} pause;
-
-                struct {
-                        char fac[64];
-                        int  level;
-                        char *str;
-                } log;
-	} rko_u;
-};
-
-TAILQ_HEAD(rd_kafka_op_head_s, rd_kafka_op_s);
-
-
-
-
-const char *rd_kafka_op2str (rd_kafka_op_type_t type);
-void rd_kafka_op_destroy (rd_kafka_op_t *rko);
-rd_kafka_op_t *rd_kafka_op_new0 (const char *source, rd_kafka_op_type_t type);
-#if ENABLE_DEVEL
-#define _STRINGIFYX(A) #A
-#define _STRINGIFY(A) _STRINGIFYX(A)
-#define rd_kafka_op_new(type)                                   \
-        rd_kafka_op_new0(__FILE__ ":" _STRINGIFY(__LINE__), type)
-#else
-#define rd_kafka_op_new(type) rd_kafka_op_new0(NULL, type)
-#endif
-rd_kafka_op_t *rd_kafka_op_new_reply (rd_kafka_op_t *rko_orig,
-                                      rd_kafka_resp_err_t err);
-rd_kafka_op_t *rd_kafka_op_new_cb (rd_kafka_t *rk,
-                                   rd_kafka_op_type_t type,
-                                   rd_kafka_op_cb_t *cb);
-int rd_kafka_op_reply (rd_kafka_op_t *rko, rd_kafka_resp_err_t err);
-
-#define rd_kafka_op_set_prio(rko,prio) ((rko)->rko_prio = prio)
-
-
-#define rd_kafka_op_err(rk,err,...) do {				\
-		if (!(rk)->rk_conf.error_cb) {				\
-			rd_kafka_log(rk, LOG_ERR, "ERROR", __VA_ARGS__); \
-			break;						\
-		}							\
-		rd_kafka_q_op_err((rk)->rk_rep, RD_KAFKA_OP_ERR, err, 0, \
-				  NULL, 0, __VA_ARGS__);		\
-	} while (0)
-
-void rd_kafka_q_op_err (rd_kafka_q_t *rkq, rd_kafka_op_type_t optype,
-                        rd_kafka_resp_err_t err, int32_t version,
-                        rd_kafka_toppar_t *rktp, int64_t offset,
-			const char *fmt, ...);
-rd_kafka_op_t *rd_kafka_op_req (rd_kafka_q_t *destq,
-                                rd_kafka_op_t *rko,
-                                int timeout_ms);
-rd_kafka_op_t *rd_kafka_op_req2 (rd_kafka_q_t *destq, rd_kafka_op_type_t type);
-rd_kafka_resp_err_t rd_kafka_op_err_destroy (rd_kafka_op_t *rko);
-
-rd_kafka_op_res_t rd_kafka_op_call (rd_kafka_t *rk,
-                                    rd_kafka_q_t *rkq, rd_kafka_op_t *rko)
-        RD_WARN_UNUSED_RESULT;
-
-rd_kafka_op_t *
-rd_kafka_op_new_fetch_msg (rd_kafka_msg_t **rkmp,
-                           rd_kafka_toppar_t *rktp,
-                           int32_t version,
-                           rd_kafka_buf_t *rkbuf,
-                           int64_t offset,
-                           size_t key_len, const void *key,
-                           size_t val_len, const void *val);
-
-void rd_kafka_op_throttle_time (struct rd_kafka_broker_s *rkb,
-				rd_kafka_q_t *rkq,
-				int throttle_time);
-
-
-rd_kafka_op_res_t
-rd_kafka_op_handle (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko,
-                    rd_kafka_q_cb_type_t cb_type, void *opaque,
-                    rd_kafka_q_serve_cb_t *callback) RD_WARN_UNUSED_RESULT;
-
-
-extern rd_atomic32_t rd_kafka_op_cnt;
-
-void rd_kafka_op_print (FILE *fp, const char *prefix, rd_kafka_op_t *rko);
-
-void rd_kafka_op_offset_store (rd_kafka_t *rk, rd_kafka_op_t *rko,
-			       const rd_kafka_message_t *rkmessage);


[30/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_cgrp.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_cgrp.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_cgrp.c
deleted file mode 100644
index 4f052ad..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_cgrp.c
+++ /dev/null
@@ -1,3204 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_request.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_assignor.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_metadata.h"
-#include "rdkafka_cgrp.h"
-#include "rdkafka_interceptor.h"
-
-
-static void rd_kafka_cgrp_check_unassign_done (rd_kafka_cgrp_t *rkcg,
-                                               const char *reason);
-static void rd_kafka_cgrp_offset_commit_tmr_cb (rd_kafka_timers_t *rkts,
-                                                void *arg);
-static void rd_kafka_cgrp_assign (rd_kafka_cgrp_t *rkcg,
-				  rd_kafka_topic_partition_list_t *assignment);
-static rd_kafka_resp_err_t rd_kafka_cgrp_unassign (rd_kafka_cgrp_t *rkcg);
-static void
-rd_kafka_cgrp_partitions_fetch_start0 (rd_kafka_cgrp_t *rkcg,
-				       rd_kafka_topic_partition_list_t
-				       *assignment, int usable_offsets,
-				       int line);
-#define rd_kafka_cgrp_partitions_fetch_start(rkcg,assignment,usable_offsets) \
-	rd_kafka_cgrp_partitions_fetch_start0(rkcg,assignment,usable_offsets,\
-					      __LINE__)
-static rd_kafka_op_res_t
-rd_kafka_cgrp_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq,
-                        rd_kafka_op_t *rko, rd_kafka_q_cb_type_t cb_type,
-                        void *opaque);
-
-static void rd_kafka_cgrp_group_leader_reset (rd_kafka_cgrp_t *rkcg,
-                                              const char *reason);
-
-/**
- * @returns true if cgrp can start partition fetchers, which is true if
- *          there is a subscription and the group is fully joined, or there
- *          is no subscription (in which case the join state is irrelevant)
- *          such as for an assign() without subscribe(). */
-#define RD_KAFKA_CGRP_CAN_FETCH_START(rkcg) \
-	((rkcg)->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_ASSIGNED)
-
-/**
- * @returns true if cgrp is waiting for a rebalance_cb to be handled by
- *          the application.
- */
-#define RD_KAFKA_CGRP_WAIT_REBALANCE_CB(rkcg)			\
-	((rkcg)->rkcg_join_state ==				\
-	 RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_REBALANCE_CB ||	\
-	 (rkcg)->rkcg_join_state ==				\
-	 RD_KAFKA_CGRP_JOIN_STATE_WAIT_REVOKE_REBALANCE_CB)
-
-
-const char *rd_kafka_cgrp_state_names[] = {
-        "init",
-        "term",
-        "query-coord",
-        "wait-coord",
-        "wait-broker",
-        "wait-broker-transport",
-        "up"
-};
-
-const char *rd_kafka_cgrp_join_state_names[] = {
-        "init",
-        "wait-join",
-        "wait-metadata",
-        "wait-sync",
-        "wait-unassign",
-        "wait-assign-rebalance_cb",
-	"wait-revoke-rebalance_cb",
-        "assigned",
-	"started"
-};
-
-
-static void rd_kafka_cgrp_set_state (rd_kafka_cgrp_t *rkcg, int state) {
-        if ((int)rkcg->rkcg_state == state)
-                return;
-
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPSTATE",
-                     "Group \"%.*s\" changed state %s -> %s "
-                     "(v%d, join-state %s)",
-                     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
-                     rd_kafka_cgrp_state_names[rkcg->rkcg_state],
-                     rd_kafka_cgrp_state_names[state],
-		     rkcg->rkcg_version,
-                     rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
-        rkcg->rkcg_state = state;
-        rkcg->rkcg_ts_statechange = rd_clock();
-
-	rd_kafka_brokers_broadcast_state_change(rkcg->rkcg_rk);
-}
-
-
-void rd_kafka_cgrp_set_join_state (rd_kafka_cgrp_t *rkcg, int join_state) {
-        if ((int)rkcg->rkcg_join_state == join_state)
-                return;
-
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPJOINSTATE",
-                     "Group \"%.*s\" changed join state %s -> %s "
-                     "(v%d, state %s)",
-                     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
-                     rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
-                     rd_kafka_cgrp_join_state_names[join_state],
-		     rkcg->rkcg_version,
-                     rd_kafka_cgrp_state_names[rkcg->rkcg_state]);
-        rkcg->rkcg_join_state = join_state;
-}
-
-
-static RD_INLINE void
-rd_kafka_cgrp_version_new_barrier0 (rd_kafka_cgrp_t *rkcg,
-				    const char *func, int line) {
-	rkcg->rkcg_version++;
-	rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "BARRIER",
-		     "Group \"%.*s\": %s:%d: new version barrier v%d",
-		     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), func, line,
-		     rkcg->rkcg_version);
-}
-
-#define rd_kafka_cgrp_version_new_barrier(rkcg) \
-	rd_kafka_cgrp_version_new_barrier0(rkcg, __FUNCTION__, __LINE__)
-
-
-void rd_kafka_cgrp_destroy_final (rd_kafka_cgrp_t *rkcg) {
-        rd_kafka_assert(rkcg->rkcg_rk, !rkcg->rkcg_assignment);
-        rd_kafka_assert(rkcg->rkcg_rk, !rkcg->rkcg_subscription);
-        rd_kafka_assert(rkcg->rkcg_rk, !rkcg->rkcg_group_leader.members);
-        rd_kafka_cgrp_set_member_id(rkcg, NULL);
-
-        rd_kafka_q_destroy(rkcg->rkcg_q);
-        rd_kafka_q_destroy(rkcg->rkcg_ops);
-	rd_kafka_q_destroy(rkcg->rkcg_wait_coord_q);
-        rd_kafka_assert(rkcg->rkcg_rk, TAILQ_EMPTY(&rkcg->rkcg_topics));
-        rd_kafka_assert(rkcg->rkcg_rk, rd_list_empty(&rkcg->rkcg_toppars));
-        rd_list_destroy(&rkcg->rkcg_toppars);
-        rd_list_destroy(rkcg->rkcg_subscribed_topics);
-        rd_free(rkcg);
-}
-
-
-
-
-rd_kafka_cgrp_t *rd_kafka_cgrp_new (rd_kafka_t *rk,
-                                    const rd_kafkap_str_t *group_id,
-                                    const rd_kafkap_str_t *client_id) {
-        rd_kafka_cgrp_t *rkcg;
-
-        rkcg = rd_calloc(1, sizeof(*rkcg));
-
-        rkcg->rkcg_rk = rk;
-        rkcg->rkcg_group_id = group_id;
-        rkcg->rkcg_client_id = client_id;
-        rkcg->rkcg_coord_id = -1;
-        rkcg->rkcg_generation_id = -1;
-	rkcg->rkcg_version = 1;
-
-        mtx_init(&rkcg->rkcg_lock, mtx_plain);
-        rkcg->rkcg_ops = rd_kafka_q_new(rk);
-        rkcg->rkcg_ops->rkq_serve = rd_kafka_cgrp_op_serve;
-        rkcg->rkcg_ops->rkq_opaque = rkcg;
-        rkcg->rkcg_wait_coord_q = rd_kafka_q_new(rk);
-        rkcg->rkcg_wait_coord_q->rkq_serve = rkcg->rkcg_ops->rkq_serve;
-        rkcg->rkcg_wait_coord_q->rkq_opaque = rkcg->rkcg_ops->rkq_opaque;
-        rkcg->rkcg_q = rd_kafka_q_new(rk);
-
-        TAILQ_INIT(&rkcg->rkcg_topics);
-        rd_list_init(&rkcg->rkcg_toppars, 32, NULL);
-        rd_kafka_cgrp_set_member_id(rkcg, "");
-        rkcg->rkcg_subscribed_topics =
-                rd_list_new(0, (void *)rd_kafka_topic_info_destroy);
-        rd_interval_init(&rkcg->rkcg_coord_query_intvl);
-        rd_interval_init(&rkcg->rkcg_heartbeat_intvl);
-        rd_interval_init(&rkcg->rkcg_join_intvl);
-        rd_interval_init(&rkcg->rkcg_timeout_scan_intvl);
-
-        if (RD_KAFKAP_STR_IS_NULL(group_id)) {
-                /* No group configured: Operate in legacy/SimpleConsumer mode */
-                rd_kafka_simple_consumer_add(rk);
-                /* no need look up group coordinator (no queries) */
-                rd_interval_disable(&rkcg->rkcg_coord_query_intvl);
-        }
-
-        if (rk->rk_conf.enable_auto_commit &&
-            rk->rk_conf.auto_commit_interval_ms > 0)
-                rd_kafka_timer_start(&rk->rk_timers,
-                                     &rkcg->rkcg_offset_commit_tmr,
-                                     rk->rk_conf.
-				     auto_commit_interval_ms * 1000ll,
-                                     rd_kafka_cgrp_offset_commit_tmr_cb,
-                                     rkcg);
-
-        return rkcg;
-}
-
-
-
-/**
- * Select a broker to handle this cgrp.
- * It will prefer the coordinator broker but if that is not available
- * any other broker that is Up will be used, and if that also fails
- * uses the internal broker handle.
- *
- * NOTE: The returned rkb will have had its refcnt increased.
- */
-static rd_kafka_broker_t *rd_kafka_cgrp_select_broker (rd_kafka_cgrp_t *rkcg) {
-        rd_kafka_broker_t *rkb = NULL;
-
-
-        /* No need for a managing broker when cgrp is terminated */
-        if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM)
-                return NULL;
-
-        rd_kafka_rdlock(rkcg->rkcg_rk);
-        /* Try to find the coordinator broker, if it isn't found
-         * move the cgrp to any other Up broker which will
-         * do further coord querying while waiting for the
-         * proper broker to materialise.
-         * If that also fails, go with the internal broker */
-        if (rkcg->rkcg_coord_id != -1)
-                rkb = rd_kafka_broker_find_by_nodeid(rkcg->rkcg_rk,
-                                                     rkcg->rkcg_coord_id);
-        if (!rkb)
-                rkb = rd_kafka_broker_prefer(rkcg->rkcg_rk,
-                                             rkcg->rkcg_coord_id,
-                                             RD_KAFKA_BROKER_STATE_UP);
-        if (!rkb)
-                rkb = rd_kafka_broker_internal(rkcg->rkcg_rk);
-
-        rd_kafka_rdunlock(rkcg->rkcg_rk);
-
-        /* Dont change managing broker unless warranted.
-         * This means do not change to another non-coordinator broker
-         * while we are waiting for the proper coordinator broker to
-         * become available. */
-        if (rkb && rkcg->rkcg_rkb && rkb != rkcg->rkcg_rkb) {
-		int old_is_coord, new_is_coord;
-
-		rd_kafka_broker_lock(rkb);
-		new_is_coord = RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg, rkb);
-		rd_kafka_broker_unlock(rkb);
-
-		rd_kafka_broker_lock(rkcg->rkcg_rkb);
-		old_is_coord = RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg,
-							     rkcg->rkcg_rkb);
-		rd_kafka_broker_unlock(rkcg->rkcg_rkb);
-
-		if (!old_is_coord && !new_is_coord &&
-		    rkcg->rkcg_rkb->rkb_source != RD_KAFKA_INTERNAL) {
-			rd_kafka_broker_destroy(rkb);
-			rkb = rkcg->rkcg_rkb;
-			rd_kafka_broker_keep(rkb);
-		}
-        }
-
-        return rkb;
-}
-
-
-
-
-/**
- * Assign cgrp to broker.
- *
- * Locality: rdkafka main thread
- */
-static void rd_kafka_cgrp_assign_broker (rd_kafka_cgrp_t *rkcg,
-					 rd_kafka_broker_t *rkb) {
-
-	rd_kafka_assert(NULL, rkcg->rkcg_rkb == NULL);
-
-	rkcg->rkcg_rkb = rkb;
-	rd_kafka_broker_keep(rkb);
-
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "BRKASSIGN",
-                     "Group \"%.*s\" management assigned to broker %s",
-                     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
-                     rd_kafka_broker_name(rkb));
-
-        /* Reset query interval to trigger an immediate
-         * coord query if required */
-        if (!rd_interval_disabled(&rkcg->rkcg_coord_query_intvl))
-                rd_interval_reset(&rkcg->rkcg_coord_query_intvl);
-
-        if (RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg, rkb))
-                rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT);
-
-}
-
-
-/**
- * Unassign cgrp from current broker.
- *
- * Locality: main thread
- */
-static void rd_kafka_cgrp_unassign_broker (rd_kafka_cgrp_t *rkcg) {
-        rd_kafka_broker_t *rkb = rkcg->rkcg_rkb;
-
-	rd_kafka_assert(NULL, rkcg->rkcg_rkb);
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "BRKUNASSIGN",
-                     "Group \"%.*s\" management unassigned "
-                     "from broker handle %s",
-                     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
-                     rd_kafka_broker_name(rkb));
-
-        rkcg->rkcg_rkb = NULL;
-        rd_kafka_broker_destroy(rkb); /* from assign() */
-}
-
-
-/**
- * Assign cgrp to a broker to handle.
- * It will prefer the coordinator broker but if that is not available
- * any other broker that is Up will be used, and if that also fails
- * uses the internal broker handle.
- *
- * Returns 1 if the cgrp was reassigned, else 0.
- */
-int rd_kafka_cgrp_reassign_broker (rd_kafka_cgrp_t *rkcg) {
-        rd_kafka_broker_t *rkb;
-
-        rkb = rd_kafka_cgrp_select_broker(rkcg);
-
-        if (rkb == rkcg->rkcg_rkb) {
-		int is_coord = 0;
-
-		if (rkb) {
-			rd_kafka_broker_lock(rkb);
-			is_coord = RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg, rkb);
-			rd_kafka_broker_unlock(rkb);
-		}
-		if (is_coord)
-                        rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT);
-                else
-                        rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER);
-
-                if (rkb)
-                        rd_kafka_broker_destroy(rkb);
-                return 0; /* No change */
-        }
-
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "BRKREASSIGN",
-                     "Group \"%.*s\" management reassigned from "
-                     "broker %s to %s",
-                     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
-                     rkcg->rkcg_rkb ?
-                     rd_kafka_broker_name(rkcg->rkcg_rkb) : "(none)",
-                     rkb ? rd_kafka_broker_name(rkb) : "(none)");
-
-
-        if (rkcg->rkcg_rkb)
-                rd_kafka_cgrp_unassign_broker(rkcg);
-
-        rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER);
-
-        if (rkb) {
-		rd_kafka_cgrp_assign_broker(rkcg, rkb);
-		rd_kafka_broker_destroy(rkb); /* from select_broker() */
-	}
-
-        return 1;
-}
-
-
-/**
- * Update the cgrp's coordinator and move it to that broker.
- */
-void rd_kafka_cgrp_coord_update (rd_kafka_cgrp_t *rkcg, int32_t coord_id) {
-
-        if (rkcg->rkcg_coord_id == coord_id) {
-		if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_WAIT_COORD)
-			rd_kafka_cgrp_set_state(rkcg,
-						RD_KAFKA_CGRP_STATE_WAIT_BROKER);
-                return;
-	}
-
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPCOORD",
-                     "Group \"%.*s\" changing coordinator %"PRId32" -> %"PRId32,
-                     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), rkcg->rkcg_coord_id,
-                     coord_id);
-        rkcg->rkcg_coord_id = coord_id;
-
-        rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER);
-
-        rd_kafka_cgrp_reassign_broker(rkcg);
-}
-
-
-
-
-
-
-/**
- * Handle GroupCoordinator response
- */
-static void rd_kafka_cgrp_handle_GroupCoordinator (rd_kafka_t *rk,
-						   rd_kafka_broker_t *rkb,
-                                                   rd_kafka_resp_err_t err,
-                                                   rd_kafka_buf_t *rkbuf,
-                                                   rd_kafka_buf_t *request,
-                                                   void *opaque) {
-        const int log_decode_errors = LOG_ERR;
-        int16_t ErrorCode = 0;
-        int32_t CoordId;
-        rd_kafkap_str_t CoordHost = RD_ZERO_INIT;
-        int32_t CoordPort;
-        rd_kafka_cgrp_t *rkcg = opaque;
-        struct rd_kafka_metadata_broker mdb = RD_ZERO_INIT;
-
-        if (likely(!(ErrorCode = err))) {
-                rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-                rd_kafka_buf_read_i32(rkbuf, &CoordId);
-                rd_kafka_buf_read_str(rkbuf, &CoordHost);
-                rd_kafka_buf_read_i32(rkbuf, &CoordPort);
-        }
-
-        if (ErrorCode)
-                goto err2;
-
-
-        mdb.id = CoordId;
-	RD_KAFKAP_STR_DUPA(&mdb.host, &CoordHost);
-	mdb.port = CoordPort;
-
-        rd_rkb_dbg(rkb, CGRP, "CGRPCOORD",
-                   "Group \"%.*s\" coordinator is %s:%i id %"PRId32,
-                   RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
-                   mdb.host, mdb.port, mdb.id);
-        rd_kafka_broker_update(rkb->rkb_rk, rkb->rkb_proto, &mdb);
-
-        rd_kafka_cgrp_coord_update(rkcg, CoordId);
-        rd_kafka_cgrp_serve(rkcg); /* Serve updated state, if possible */
-        return;
-
-err_parse: /* Parse error */
-        ErrorCode = rkbuf->rkbuf_err;
-        /* FALLTHRU */
-
-err2:
-        rd_rkb_dbg(rkb, CGRP, "CGRPCOORD",
-                   "Group \"%.*s\" GroupCoordinator response error: %s",
-                   RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
-                   rd_kafka_err2str(ErrorCode));
-
-        if (ErrorCode == RD_KAFKA_RESP_ERR__DESTROY)
-                return;
-
-        if (ErrorCode == RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE)
-                rd_kafka_cgrp_coord_update(rkcg, -1);
-	else {
-                if (rkcg->rkcg_last_err != ErrorCode) {
-                        rd_kafka_q_op_err(rkcg->rkcg_q,
-                                          RD_KAFKA_OP_CONSUMER_ERR,
-                                          ErrorCode, 0, NULL, 0,
-                                          "GroupCoordinator response error: %s",
-                                          rd_kafka_err2str(ErrorCode));
-
-                        /* Suppress repeated errors */
-                        rkcg->rkcg_last_err = ErrorCode;
-                }
-
-		/* Continue querying */
-		rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD);
-        }
-
-        rd_kafka_cgrp_serve(rkcg); /* Serve updated state, if possible */
-}
-
-
-/**
- * Query for coordinator.
- * Ask any broker in state UP
- *
- * Locality: main thread
- */
-void rd_kafka_cgrp_coord_query (rd_kafka_cgrp_t *rkcg,
-				const char *reason) {
-	rd_kafka_broker_t *rkb;
-
-	rd_kafka_rdlock(rkcg->rkcg_rk);
-	rkb = rd_kafka_broker_any(rkcg->rkcg_rk, RD_KAFKA_BROKER_STATE_UP,
-				  rd_kafka_broker_filter_can_group_query, NULL);
-	rd_kafka_rdunlock(rkcg->rkcg_rk);
-
-	if (!rkb) {
-		rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPQUERY",
-			     "Group \"%.*s\": "
-			     "no broker available for coordinator query: %s",
-			     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason);
-		return;
-	}
-
-        rd_rkb_dbg(rkb, CGRP, "CGRPQUERY",
-                   "Group \"%.*s\": querying for coordinator: %s",
-                   RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason);
-
-        rd_kafka_GroupCoordinatorRequest(rkb, rkcg->rkcg_group_id,
-                                         RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
-                                         rd_kafka_cgrp_handle_GroupCoordinator,
-                                         rkcg);
-
-        if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_QUERY_COORD)
-                rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_WAIT_COORD);
-
-	rd_kafka_broker_destroy(rkb);
-}
-
-/**
- * @brief Mark the current coordinator as dead.
- *
- * @locality main thread
- */
-void rd_kafka_cgrp_coord_dead (rd_kafka_cgrp_t *rkcg, rd_kafka_resp_err_t err,
-			       const char *reason) {
-	rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COORD",
-		     "Group \"%.*s\": marking the coordinator dead: %s: %s",
-		     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
-		     rd_kafka_err2str(err), reason);
-
-	rd_kafka_cgrp_coord_update(rkcg, -1);
-
-	/* Re-query for coordinator */
-	rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD);
-	rd_kafka_cgrp_coord_query(rkcg, reason);
-}
-
-
-
-static void rd_kafka_cgrp_leave (rd_kafka_cgrp_t *rkcg, int ignore_response) {
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "LEAVE",
-                   "Group \"%.*s\": leave",
-                   RD_KAFKAP_STR_PR(rkcg->rkcg_group_id));
-
-        if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_UP)
-                rd_kafka_LeaveGroupRequest(rkcg->rkcg_rkb, rkcg->rkcg_group_id,
-                                           rkcg->rkcg_member_id,
-					   ignore_response ?
-					   RD_KAFKA_NO_REPLYQ :
-                                           RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
-                                           ignore_response ? NULL :
-                                           rd_kafka_handle_LeaveGroup, rkcg);
-        else if (!ignore_response)
-                rd_kafka_handle_LeaveGroup(rkcg->rkcg_rk, rkcg->rkcg_rkb,
-                                           RD_KAFKA_RESP_ERR__WAIT_COORD,
-                                           NULL, NULL, rkcg);
-}
-
-
-/**
- * Enqueue a rebalance op (if configured). 'partitions' is copied.
- * This delegates the responsibility of assign() and unassign() to the
- * application.
- *
- * Returns 1 if a rebalance op was enqueued, else 0.
- * Returns 0 if there was no rebalance_cb or 'assignment' is NULL,
- * in which case rd_kafka_cgrp_assign(rkcg,assignment) is called immediately.
- */
-static int
-rd_kafka_rebalance_op (rd_kafka_cgrp_t *rkcg,
-		       rd_kafka_resp_err_t err,
-		       rd_kafka_topic_partition_list_t *assignment,
-		       const char *reason) {
-	rd_kafka_op_t *rko;
-
-        rd_kafka_wrlock(rkcg->rkcg_rk);
-        rkcg->rkcg_c.ts_rebalance = rd_clock();
-        rkcg->rkcg_c.rebalance_cnt++;
-        rd_kafka_wrunlock(rkcg->rkcg_rk);
-
-	/* Pause current partition set consumers until new assign() is called */
-	if (rkcg->rkcg_assignment)
-		rd_kafka_toppars_pause_resume(rkcg->rkcg_rk, 1,
-					      RD_KAFKA_TOPPAR_F_LIB_PAUSE,
-					      rkcg->rkcg_assignment);
-
-	if (!(rkcg->rkcg_rk->rk_conf.enabled_events & RD_KAFKA_EVENT_REBALANCE)
-	    || !assignment) {
-	no_delegation:
-		if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
-			rd_kafka_cgrp_assign(rkcg, assignment);
-		else
-			rd_kafka_cgrp_unassign(rkcg);
-		return 0;
-	}
-
-	rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN",
-		     "Group \"%s\": delegating %s of %d partition(s) "
-		     "to application rebalance callback on queue %s: %s",
-		     rkcg->rkcg_group_id->str,
-		     err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS ?
-		     "revoke":"assign", assignment->cnt,
-		     rd_kafka_q_dest_name(rkcg->rkcg_q), reason);
-
-	rd_kafka_cgrp_set_join_state(
-		rkcg,
-		err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS ?
-		RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_REBALANCE_CB :
-		RD_KAFKA_CGRP_JOIN_STATE_WAIT_REVOKE_REBALANCE_CB);
-
-	rko = rd_kafka_op_new(RD_KAFKA_OP_REBALANCE);
-	rko->rko_err = err;
-	rko->rko_u.rebalance.partitions =
-		rd_kafka_topic_partition_list_copy(assignment);
-
-	if (rd_kafka_q_enq(rkcg->rkcg_q, rko) == 0) {
-		/* Queue disabled, handle assignment here. */
-		goto no_delegation;
-	}
-
-	return 1;
-}
-
-
-/**
- * @brief Run group assignment.
- */
-static void
-rd_kafka_cgrp_assignor_run (rd_kafka_cgrp_t *rkcg,
-                            const char *protocol_name,
-                            rd_kafka_resp_err_t err,
-                            rd_kafka_metadata_t *metadata,
-                            rd_kafka_group_member_t *members,
-                            int member_cnt) {
-        char errstr[512];
-
-        if (err) {
-                rd_snprintf(errstr, sizeof(errstr),
-                            "Failed to get cluster metadata: %s",
-                            rd_kafka_err2str(err));
-                goto err;
-        }
-
-        *errstr = '\0';
-
-        /* Run assignor */
-        err = rd_kafka_assignor_run(rkcg, protocol_name, metadata,
-                                    members, member_cnt,
-                                    errstr, sizeof(errstr));
-
-        if (err) {
-                if (!*errstr)
-                        rd_snprintf(errstr, sizeof(errstr), "%s",
-                                    rd_kafka_err2str(err));
-                goto err;
-        }
-
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNOR",
-                     "Group \"%s\": \"%s\" assignor run for %d member(s)",
-                     rkcg->rkcg_group_id->str, protocol_name, member_cnt);
-
-        rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC);
-
-        /* Respond to broker with assignment set or error */
-        rd_kafka_SyncGroupRequest(rkcg->rkcg_rkb,
-                                  rkcg->rkcg_group_id, rkcg->rkcg_generation_id,
-                                  rkcg->rkcg_member_id,
-                                  members, err ? 0 : member_cnt,
-                                  RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
-                                  rd_kafka_handle_SyncGroup, rkcg);
-        return;
-
-err:
-        rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "ASSIGNOR",
-                     "Group \"%s\": failed to run assignor \"%s\" for "
-                     "%d member(s): %s",
-                     rkcg->rkcg_group_id->str, protocol_name,
-                     member_cnt, errstr);
-
-        rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_INIT);
-
-}
-
-
-
-/**
- * @brief Op callback from handle_JoinGroup
- */
-static rd_kafka_op_res_t
-rd_kafka_cgrp_assignor_handle_Metadata_op (rd_kafka_t *rk,
-                                           rd_kafka_q_t *rkq,
-                                           rd_kafka_op_t *rko) {
-        rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
-
-        if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
-                return RD_KAFKA_OP_RES_HANDLED; /* Terminating */
-
-        if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA)
-                return RD_KAFKA_OP_RES_HANDLED; /* From outdated state */
-
-        if (!rkcg->rkcg_group_leader.protocol) {
-                rd_kafka_dbg(rk, CGRP, "GRPLEADER",
-                             "Group \"%.*s\": no longer leader: "
-                             "not running assignor",
-                             RD_KAFKAP_STR_PR(rkcg->rkcg_group_id));
-                return RD_KAFKA_OP_RES_HANDLED;
-        }
-
-        rd_kafka_cgrp_assignor_run(rkcg,
-                                   rkcg->rkcg_group_leader.protocol,
-                                   rko->rko_err, rko->rko_u.metadata.md,
-                                   rkcg->rkcg_group_leader.members,
-                                   rkcg->rkcg_group_leader.member_cnt);
-
-        return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-/**
- * Parse single JoinGroup.Members.MemberMetadata for "consumer" ProtocolType
- *
- * Protocol definition:
- * https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Client-side+Assignment+Proposal
- *
- * Returns 0 on success or -1 on error.
- */
-static int
-rd_kafka_group_MemberMetadata_consumer_read (
-        rd_kafka_broker_t *rkb, rd_kafka_group_member_t *rkgm,
-        const rd_kafkap_str_t *GroupProtocol,
-        const rd_kafkap_bytes_t *MemberMetadata) {
-
-        rd_kafka_buf_t *rkbuf;
-        int16_t Version;
-        int32_t subscription_cnt;
-        rd_kafkap_bytes_t UserData;
-        const int log_decode_errors = LOG_ERR;
-        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__BAD_MSG;
-
-        /* Create a shadow-buffer pointing to the metadata to ease parsing. */
-        rkbuf = rd_kafka_buf_new_shadow(MemberMetadata->data,
-                                        RD_KAFKAP_BYTES_LEN(MemberMetadata),
-                                        NULL);
-
-        rd_kafka_buf_read_i16(rkbuf, &Version);
-        rd_kafka_buf_read_i32(rkbuf, &subscription_cnt);
-
-        if (subscription_cnt > 10000 || subscription_cnt <= 0)
-                goto err;
-
-        rkgm->rkgm_subscription =
-                rd_kafka_topic_partition_list_new(subscription_cnt);
-
-        while (subscription_cnt-- > 0) {
-                rd_kafkap_str_t Topic;
-                char *topic_name;
-                rd_kafka_buf_read_str(rkbuf, &Topic);
-                RD_KAFKAP_STR_DUPA(&topic_name, &Topic);
-                rd_kafka_topic_partition_list_add(rkgm->rkgm_subscription,
-                                                  topic_name,
-                                                  RD_KAFKA_PARTITION_UA);
-        }
-
-        rd_kafka_buf_read_bytes(rkbuf, &UserData);
-        rkgm->rkgm_userdata = rd_kafkap_bytes_copy(&UserData);
-
-        rd_kafka_buf_destroy(rkbuf);
-
-        return 0;
-
- err_parse:
-        err = rkbuf->rkbuf_err;
-
- err:
-        rd_rkb_dbg(rkb, CGRP, "MEMBERMETA",
-                   "Failed to parse MemberMetadata for \"%.*s\": %s",
-                   RD_KAFKAP_STR_PR(rkgm->rkgm_member_id),
-                   rd_kafka_err2str(err));
-        if (rkgm->rkgm_subscription) {
-                rd_kafka_topic_partition_list_destroy(rkgm->
-                                                      rkgm_subscription);
-                rkgm->rkgm_subscription = NULL;
-        }
-
-        rd_kafka_buf_destroy(rkbuf);
-        return -1;
-}
-
-
-
-
-/**
- * @brief cgrp handler for JoinGroup responses
- * opaque must be the cgrp handle.
- *
- * @locality cgrp broker thread
- */
-static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk,
-                                            rd_kafka_broker_t *rkb,
-                                            rd_kafka_resp_err_t err,
-                                            rd_kafka_buf_t *rkbuf,
-                                            rd_kafka_buf_t *request,
-                                            void *opaque) {
-        rd_kafka_cgrp_t *rkcg = opaque;
-        const int log_decode_errors = LOG_ERR;
-        int16_t ErrorCode = 0;
-        int32_t GenerationId;
-        rd_kafkap_str_t Protocol, LeaderId, MyMemberId;
-        int32_t member_cnt;
-        int actions;
-        int i_am_leader = 0;
-
-        if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN) {
-                rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP",
-                             "JoinGroup response: discarding outdated request "
-                             "(now in join-state %s)",
-                             rd_kafka_cgrp_join_state_names[rkcg->
-                                                            rkcg_join_state]);
-                return;
-        }
-
-        if (err) {
-                ErrorCode = err;
-                goto err;
-        }
-
-        rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-        rd_kafka_buf_read_i32(rkbuf, &GenerationId);
-        rd_kafka_buf_read_str(rkbuf, &Protocol);
-        rd_kafka_buf_read_str(rkbuf, &LeaderId);
-        rd_kafka_buf_read_str(rkbuf, &MyMemberId);
-        rd_kafka_buf_read_i32(rkbuf, &member_cnt);
-
-        if (!ErrorCode && RD_KAFKAP_STR_IS_NULL(&Protocol)) {
-                /* Protocol not set, we will not be able to find
-                 * a matching assignor so error out early. */
-                ErrorCode = RD_KAFKA_RESP_ERR__BAD_MSG;
-        }
-
-        rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP",
-                     "JoinGroup response: GenerationId %"PRId32", "
-                     "Protocol %.*s, LeaderId %.*s%s, my MemberId %.*s, "
-                     "%"PRId32" members in group: %s",
-                     GenerationId,
-                     RD_KAFKAP_STR_PR(&Protocol),
-                     RD_KAFKAP_STR_PR(&LeaderId),
-                     !rd_kafkap_str_cmp(&LeaderId, &MyMemberId) ? " (me)" : "",
-                     RD_KAFKAP_STR_PR(&MyMemberId),
-                     member_cnt,
-                     ErrorCode ? rd_kafka_err2str(ErrorCode) : "(no error)");
-
-        if (!ErrorCode) {
-                char *my_member_id;
-                RD_KAFKAP_STR_DUPA(&my_member_id, &MyMemberId);
-                rkcg->rkcg_generation_id = GenerationId;
-                rd_kafka_cgrp_set_member_id(rkcg, my_member_id);
-                i_am_leader = !rd_kafkap_str_cmp(&LeaderId, &MyMemberId);
-        } else {
-                rd_interval_backoff(&rkcg->rkcg_join_intvl, 1000*1000);
-                goto err;
-        }
-
-        if (i_am_leader) {
-                rd_kafka_group_member_t *members;
-                int i;
-                int sub_cnt = 0;
-                rd_list_t topics;
-                rd_kafka_op_t *rko;
-                rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP",
-                             "Elected leader for group \"%s\" "
-                             "with %"PRId32" member(s)",
-                             rkcg->rkcg_group_id->str, member_cnt);
-
-                if (member_cnt > 100000) {
-                        err = RD_KAFKA_RESP_ERR__BAD_MSG;
-                        goto err;
-                }
-
-                rd_list_init(&topics, member_cnt, rd_free);
-
-                members = rd_calloc(member_cnt, sizeof(*members));
-
-                for (i = 0 ; i < member_cnt ; i++) {
-                        rd_kafkap_str_t MemberId;
-                        rd_kafkap_bytes_t MemberMetadata;
-                        rd_kafka_group_member_t *rkgm;
-
-                        rd_kafka_buf_read_str(rkbuf, &MemberId);
-                        rd_kafka_buf_read_bytes(rkbuf, &MemberMetadata);
-
-                        rkgm = &members[sub_cnt];
-                        rkgm->rkgm_member_id = rd_kafkap_str_copy(&MemberId);
-                        rd_list_init(&rkgm->rkgm_eligible, 0, NULL);
-
-                        if (rd_kafka_group_MemberMetadata_consumer_read(
-                                    rkb, rkgm, &Protocol, &MemberMetadata)) {
-                                /* Failed to parse this member's metadata,
-                                 * ignore it. */
-                        } else {
-                                sub_cnt++;
-                                rkgm->rkgm_assignment =
-                                        rd_kafka_topic_partition_list_new(
-                                                rkgm->rkgm_subscription->size);
-                                rd_kafka_topic_partition_list_get_topic_names(
-                                        rkgm->rkgm_subscription, &topics,
-                                        0/*dont include regex*/);
-                        }
-
-                }
-
-                /* FIXME: What to do if parsing failed for some/all members?
-                 *        It is a sign of incompatibility. */
-
-
-                rd_kafka_cgrp_group_leader_reset(rkcg,
-                                                 "JoinGroup response clean-up");
-
-                rkcg->rkcg_group_leader.protocol = RD_KAFKAP_STR_DUP(&Protocol);
-                rd_kafka_assert(NULL, rkcg->rkcg_group_leader.members == NULL);
-                rkcg->rkcg_group_leader.members    = members;
-                rkcg->rkcg_group_leader.member_cnt = sub_cnt;
-
-                rd_kafka_cgrp_set_join_state(
-                        rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA);
-
-                /* The assignor will need metadata so fetch it asynchronously
-                 * and run the assignor when we get a reply.
-                 * Create a callback op that the generic metadata code
-                 * will trigger when metadata has been parsed. */
-                rko = rd_kafka_op_new_cb(
-                        rkcg->rkcg_rk, RD_KAFKA_OP_METADATA,
-                        rd_kafka_cgrp_assignor_handle_Metadata_op);
-                rd_kafka_op_set_replyq(rko, rkcg->rkcg_ops, NULL);
-
-                rd_kafka_MetadataRequest(rkb, &topics,
-                                         "partition assignor", rko);
-                rd_list_destroy(&topics);
-
-        } else {
-                rd_kafka_cgrp_set_join_state(
-                        rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC);
-
-                rd_kafka_SyncGroupRequest(rkb, rkcg->rkcg_group_id,
-                                          rkcg->rkcg_generation_id,
-                                          rkcg->rkcg_member_id,
-                                          NULL, 0,
-                                          RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
-                                          rd_kafka_handle_SyncGroup, rkcg);
-
-        }
-
-err:
-        actions = rd_kafka_err_action(rkb, ErrorCode, rkbuf, request,
-                                      RD_KAFKA_ERR_ACTION_IGNORE,
-                                      RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID,
-
-                                      RD_KAFKA_ERR_ACTION_END);
-
-        if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
-                /* Re-query for coordinator */
-                rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ,
-                                 RD_KAFKA_OP_COORD_QUERY, ErrorCode);
-        }
-
-        if (ErrorCode) {
-                if (ErrorCode == RD_KAFKA_RESP_ERR__DESTROY)
-                        return; /* Termination */
-
-                if (actions & RD_KAFKA_ERR_ACTION_PERMANENT)
-                        rd_kafka_q_op_err(rkcg->rkcg_q,
-                                          RD_KAFKA_OP_CONSUMER_ERR,
-                                          ErrorCode, 0, NULL, 0,
-                                          "JoinGroup failed: %s",
-                                          rd_kafka_err2str(ErrorCode));
-
-                if (ErrorCode == RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)
-                        rd_kafka_cgrp_set_member_id(rkcg, "");
-                rd_kafka_cgrp_set_join_state(rkcg,
-                                             RD_KAFKA_CGRP_JOIN_STATE_INIT);
-        }
-
-        return;
-
- err_parse:
-        ErrorCode = rkbuf->rkbuf_err;
-        goto err;
-}
-
-
-/**
- * @brief Check subscription against requested Metadata.
- */
-static rd_kafka_op_res_t
-rd_kafka_cgrp_handle_Metadata_op (rd_kafka_t *rk, rd_kafka_q_t *rkq,
-                                  rd_kafka_op_t *rko) {
-        rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
-
-        if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
-                return RD_KAFKA_OP_RES_HANDLED; /* Terminating */
-
-        rd_kafka_cgrp_metadata_update_check(rkcg, 0/*dont rejoin*/);
-
-        return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-/**
- * @brief (Async) Refresh metadata (for cgrp's needs)
- *
- * @returns 1 if metadata refresh was requested, or 0 if metadata is
- *          up to date, or -1 if no broker is available for metadata requests.
- *
- * @locks none
- * @locality rdkafka main thread
- */
-static int rd_kafka_cgrp_metadata_refresh (rd_kafka_cgrp_t *rkcg,
-                                            int *metadata_agep,
-                                            const char *reason) {
-        rd_kafka_t *rk = rkcg->rkcg_rk;
-        rd_kafka_op_t *rko;
-        rd_list_t topics;
-        rd_kafka_resp_err_t err;
-
-        rd_list_init(&topics, 8, rd_free);
-
-        /* Insert all non-wildcard topics in cache. */
-        rd_kafka_metadata_cache_hint_rktparlist(rkcg->rkcg_rk,
-                                                rkcg->rkcg_subscription,
-                                                NULL, 0/*dont replace*/);
-
-        if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION) {
-                /* For wildcard subscriptions make sure the
-                 * cached full metadata isn't too old. */
-                int metadata_age = -1;
-
-                if (rk->rk_ts_full_metadata)
-                        metadata_age = (int)(rd_clock() -
-                                             rk->rk_ts_full_metadata)/1000;
-
-                *metadata_agep = metadata_age;
-
-                if (metadata_age != -1 &&
-                    metadata_age <=
-                    /* The +1000 is since metadata.refresh.interval.ms
-                     * can be set to 0. */
-                    rk->rk_conf.metadata_refresh_interval_ms + 1000) {
-                        rd_kafka_dbg(rk, CGRP|RD_KAFKA_DBG_METADATA,
-                                     "CGRPMETADATA",
-                                     "%s: metadata for wildcard subscription "
-                                     "is up to date (%dms old)",
-                                     reason, *metadata_agep);
-                        rd_list_destroy(&topics);
-                        return 0; /* Up-to-date */
-                }
-
-        } else {
-                /* Check that all subscribed topics are in the cache. */
-                int r;
-
-                rd_kafka_topic_partition_list_get_topic_names(
-                        rkcg->rkcg_subscription, &topics, 0/*no regexps*/);
-
-                rd_kafka_rdlock(rk);
-                r = rd_kafka_metadata_cache_topics_count_exists(rk, &topics,
-                                                                metadata_agep);
-                rd_kafka_rdunlock(rk);
-
-                if (r == rd_list_cnt(&topics)) {
-                        rd_kafka_dbg(rk, CGRP|RD_KAFKA_DBG_METADATA,
-                                     "CGRPMETADATA",
-                                     "%s: metadata for subscription "
-                                     "is up to date (%dms old)", reason,
-                                     *metadata_agep);
-                        rd_list_destroy(&topics);
-                        return 0; /* Up-to-date and all topics exist. */
-                }
-
-                rd_kafka_dbg(rk, CGRP|RD_KAFKA_DBG_METADATA,
-                             "CGRPMETADATA",
-                             "%s: metadata for subscription "
-                             "only available for %d/%d topics (%dms old)",
-                             reason, r, rd_list_cnt(&topics), *metadata_agep);
-
-        }
-
-        /* Async request, result will be triggered from
-         * rd_kafka_parse_metadata(). */
-        rko = rd_kafka_op_new_cb(rkcg->rkcg_rk, RD_KAFKA_OP_METADATA,
-                                 rd_kafka_cgrp_handle_Metadata_op);
-        rd_kafka_op_set_replyq(rko, rkcg->rkcg_ops, 0);
-
-        err = rd_kafka_metadata_request(rkcg->rkcg_rk, NULL, &topics,
-                                        reason, rko);
-        if (err) {
-                rd_kafka_dbg(rk, CGRP|RD_KAFKA_DBG_METADATA,
-                             "CGRPMETADATA",
-                             "%s: need to refresh metadata (%dms old) "
-                             "but no usable brokers available: %s",
-                             reason, *metadata_agep, rd_kafka_err2str(err));
-                rd_kafka_op_destroy(rko);
-        }
-
-        rd_list_destroy(&topics);
-
-        return err ? -1 : 1;
-}
-
-
-
-static void rd_kafka_cgrp_join (rd_kafka_cgrp_t *rkcg) {
-        int metadata_age;
-
-        if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP ||
-            rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_INIT)
-                return;
-
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "JOIN",
-                     "Group \"%.*s\": join with %d (%d) subscribed topic(s)",
-                     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
-                     rd_list_cnt(rkcg->rkcg_subscribed_topics),
-                     rkcg->rkcg_subscription->cnt);
-
-
-        /* See if we need to query metadata to continue:
-         * - if subscription contains wildcards:
-         *   * query all topics in cluster
-         *
-         * - if subscription does not contain wildcards but
-         *   some topics are missing from the local metadata cache:
-         *   * query subscribed topics (all cached ones)
-         *
-         * - otherwise:
-         *   * rely on topic metadata cache
-         */
-        /* We need up-to-date full metadata to continue,
-         * refresh metadata if necessary. */
-        if (rd_kafka_cgrp_metadata_refresh(rkcg, &metadata_age,
-                                           "consumer join") == 1) {
-                rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "JOIN",
-                             "Group \"%.*s\": "
-                             "postponing join until up-to-date "
-                             "metadata is available",
-                             RD_KAFKAP_STR_PR(rkcg->rkcg_group_id));
-                return; /* ^ async call */
-        }
-
-        if (rd_list_empty(rkcg->rkcg_subscribed_topics))
-                rd_kafka_cgrp_metadata_update_check(rkcg, 0/*dont join*/);
-
-        if (rd_list_empty(rkcg->rkcg_subscribed_topics)) {
-                rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "JOIN",
-                             "Group \"%.*s\": "
-                             "no matching topics based on %dms old metadata: "
-                             "next metadata refresh in %dms",
-                             RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
-                             metadata_age,
-                             rkcg->rkcg_rk->rk_conf.
-                             metadata_refresh_interval_ms - metadata_age);
-                return;
-        }
-
-        rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN);
-        rd_kafka_JoinGroupRequest(rkcg->rkcg_rkb, rkcg->rkcg_group_id,
-                                  rkcg->rkcg_member_id,
-                                  rkcg->rkcg_rk->rk_conf.group_protocol_type,
-                                  rkcg->rkcg_subscribed_topics,
-                                  RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
-                                  rd_kafka_cgrp_handle_JoinGroup, rkcg);
-}
-
-/**
- * Rejoin group on update to effective subscribed topics list
- */
-static void rd_kafka_cgrp_rejoin (rd_kafka_cgrp_t *rkcg) {
-        /*
-         * Clean-up group leader duties, if any.
-         */
-        rd_kafka_cgrp_group_leader_reset(rkcg, "Group rejoin");
-
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "REJOIN",
-                     "Group \"%.*s\" rejoining in join-state %s "
-                     "with%s an assignment",
-                     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
-                     rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
-                     rkcg->rkcg_assignment ? "" : "out");
-
-        /* Remove assignment (async), if any. If there is already an
-         * unassign in progress we dont need to bother. */
-        if (rkcg->rkcg_assignment) {
-		if (!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_UNASSIGN)) {
-			rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_WAIT_UNASSIGN;
-
-			rd_kafka_rebalance_op(
-				rkcg,
-				RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
-				rkcg->rkcg_assignment, "unsubscribe");
-		}
-	} else {
-                rd_kafka_cgrp_set_join_state(rkcg,
-                                             RD_KAFKA_CGRP_JOIN_STATE_INIT);
-		rd_kafka_cgrp_join(rkcg);
-	}
-}
-
-/**
- * Update the effective list of subscribed topics and trigger a rejoin
- * if it changed.
- *
- * Set \p tinfos to NULL for clearing the list.
- *
- * @param tinfos rd_list_t(rd_kafka_topic_info_t *): new effective topic list
- *
- * @returns 1 on change, else 0.
- *
- * @remark Takes ownership of \p tinfos
- */
-static int
-rd_kafka_cgrp_update_subscribed_topics (rd_kafka_cgrp_t *rkcg,
-                                        rd_list_t *tinfos) {
-        rd_kafka_topic_info_t *tinfo;
-        int i;
-
-        if (!tinfos) {
-                if (!rd_list_empty(rkcg->rkcg_subscribed_topics))
-                        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIPTION",
-                                     "Group \"%.*s\": "
-                                     "clearing subscribed topics list (%d)",
-                                     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
-                                     rd_list_cnt(rkcg->rkcg_subscribed_topics));
-                tinfos = rd_list_new(0, (void *)rd_kafka_topic_info_destroy);
-
-        } else {
-                if (rd_list_cnt(tinfos) == 0)
-                        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIPTION",
-                                     "Group \"%.*s\": "
-                                     "no topics in metadata matched "
-                                     "subscription",
-                                     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id));
-        }
-
-        /* Sort for comparison */
-        rd_list_sort(tinfos, rd_kafka_topic_info_cmp);
-
-        /* Compare to existing to see if anything changed. */
-        if (!rd_list_cmp(rkcg->rkcg_subscribed_topics, tinfos,
-                         rd_kafka_topic_info_cmp)) {
-                /* No change */
-                rd_list_destroy(tinfos);
-                return 0;
-        }
-
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_METADATA, "SUBSCRIPTION",
-                     "Group \"%.*s\": effective subscription list changed "
-                     "from %d to %d topic(s):",
-                     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
-                     rd_list_cnt(rkcg->rkcg_subscribed_topics),
-                     rd_list_cnt(tinfos));
-
-        RD_LIST_FOREACH(tinfo, tinfos, i)
-                rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_METADATA,
-                             "SUBSCRIPTION",
-                             " Topic %s with %d partition(s)",
-                             tinfo->topic, tinfo->partition_cnt);
-
-        rd_list_destroy(rkcg->rkcg_subscribed_topics);
-
-        rkcg->rkcg_subscribed_topics = tinfos;
-
-        return 1;
-}
-
-
-
-/**
- * @brief Handle heart Heartbeat response.
- */
-void rd_kafka_cgrp_handle_Heartbeat (rd_kafka_t *rk,
-                                     rd_kafka_broker_t *rkb,
-                                     rd_kafka_resp_err_t err,
-                                     rd_kafka_buf_t *rkbuf,
-                                     rd_kafka_buf_t *request,
-                                     void *opaque) {
-        rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
-        const int log_decode_errors = LOG_ERR;
-        int16_t ErrorCode = 0;
-        int actions;
-
-        if (err) {
-                ErrorCode = err;
-                goto err;
-        }
-
-        rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-
-err:
-        actions = rd_kafka_err_action(rkb, ErrorCode, rkbuf, request,
-                                      RD_KAFKA_ERR_ACTION_END);
-
-        rd_dassert(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT);
-        rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT;
-
-        if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
-                /* Re-query for coordinator */
-                rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ,
-                                 RD_KAFKA_OP_COORD_QUERY, ErrorCode);
-                /* Schedule a retry */
-                if (ErrorCode != RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP) {
-                        rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT;
-                        rd_kafka_buf_keep(request);
-                        rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT;
-                        rd_kafka_broker_buf_retry(request->rkbuf_rkb, request);
-                }
-                return;
-        }
-
-        if (ErrorCode != 0 && ErrorCode != RD_KAFKA_RESP_ERR__DESTROY)
-                rd_kafka_cgrp_handle_heartbeat_error(rkcg, ErrorCode);
-
-        return;
-
- err_parse:
-        ErrorCode = rkbuf->rkbuf_err;
-        goto err;
-}
-
-
-
-/**
- * @brief Send Heartbeat
- */
-static void rd_kafka_cgrp_heartbeat (rd_kafka_cgrp_t *rkcg,
-                                     rd_kafka_broker_t *rkb) {
-        /* Skip heartbeat if we have one in transit */
-        if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT)
-                return;
-
-        rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT;
-        rd_kafka_HeartbeatRequest(rkb, rkcg->rkcg_group_id,
-                                  rkcg->rkcg_generation_id,
-                                  rkcg->rkcg_member_id,
-                                  RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
-                                  rd_kafka_cgrp_handle_Heartbeat, NULL);
-}
-
-/**
- * Cgrp is now terminated: decommission it and signal back to application.
- */
-static void rd_kafka_cgrp_terminated (rd_kafka_cgrp_t *rkcg) {
-
-	rd_kafka_assert(NULL, rkcg->rkcg_wait_unassign_cnt == 0);
-	rd_kafka_assert(NULL, rkcg->rkcg_wait_commit_cnt == 0);
-	rd_kafka_assert(NULL, !(rkcg->rkcg_flags&RD_KAFKA_CGRP_F_WAIT_UNASSIGN));
-        rd_kafka_assert(NULL, rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM);
-
-        rd_kafka_timer_stop(&rkcg->rkcg_rk->rk_timers,
-                            &rkcg->rkcg_offset_commit_tmr, 1/*lock*/);
-
-	rd_kafka_q_purge(rkcg->rkcg_wait_coord_q);
-
-	/* Disable and empty ops queue since there will be no
-	 * (broker) thread serving it anymore after the unassign_broker
-	 * below.
-	 * This prevents hang on destroy where responses are enqueued on rkcg_ops
-	 * without anything serving the queue. */
-	rd_kafka_q_disable(rkcg->rkcg_ops);
-	rd_kafka_q_purge(rkcg->rkcg_ops);
-
-	if (rkcg->rkcg_rkb)
-		rd_kafka_cgrp_unassign_broker(rkcg);
-
-        if (rkcg->rkcg_reply_rko) {
-                /* Signal back to application. */
-                rd_kafka_replyq_enq(&rkcg->rkcg_reply_rko->rko_replyq,
-				    rkcg->rkcg_reply_rko, 0);
-                rkcg->rkcg_reply_rko = NULL;
-        }
-}
-
-
-/**
- * If a cgrp is terminating and all outstanding ops are now finished
- * then progress to final termination and return 1.
- * Else returns 0.
- */
-static RD_INLINE int rd_kafka_cgrp_try_terminate (rd_kafka_cgrp_t *rkcg) {
-
-        if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM)
-                return 1;
-
-	if (likely(!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)))
-		return 0;
-
-	/* Check if wait-coord queue has timed out. */
-	if (rd_kafka_q_len(rkcg->rkcg_wait_coord_q) > 0 &&
-	    rkcg->rkcg_ts_terminate +
-	    (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000) <
-	    rd_clock()) {
-		rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM",
-			     "Group \"%s\": timing out %d op(s) in "
-			     "wait-for-coordinator queue",
-			     rkcg->rkcg_group_id->str,
-			     rd_kafka_q_len(rkcg->rkcg_wait_coord_q));
-		rd_kafka_q_disable(rkcg->rkcg_wait_coord_q);
-		if (rd_kafka_q_concat(rkcg->rkcg_ops,
-				      rkcg->rkcg_wait_coord_q) == -1) {
-			/* ops queue shut down, purge coord queue */
-			rd_kafka_q_purge(rkcg->rkcg_wait_coord_q);
-		}
-	}
-
-	if (!RD_KAFKA_CGRP_WAIT_REBALANCE_CB(rkcg) &&
-	    rd_list_empty(&rkcg->rkcg_toppars) &&
-	    rkcg->rkcg_wait_unassign_cnt == 0 &&
-	    rkcg->rkcg_wait_commit_cnt == 0 &&
-            !(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_UNASSIGN)) {
-                /* Since we might be deep down in a 'rko' handler
-                 * called from cgrp_op_serve() we cant call terminated()
-                 * directly since it will decommission the rkcg_ops queue
-                 * that might be locked by intermediate functions.
-                 * Instead set the TERM state and let the cgrp terminate
-                 * at its own discretion. */
-                rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_TERM);
-                return 1;
-        } else {
-		rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM",
-			     "Group \"%s\": "
-			     "waiting for %s%d toppar(s), %d unassignment(s), "
-			     "%d commit(s)%s (state %s, join-state %s) "
-			     "before terminating",
-			     rkcg->rkcg_group_id->str,
-                             RD_KAFKA_CGRP_WAIT_REBALANCE_CB(rkcg) ?
-                             "rebalance_cb, ": "",
-			     rd_list_cnt(&rkcg->rkcg_toppars),
-			     rkcg->rkcg_wait_unassign_cnt,
-			     rkcg->rkcg_wait_commit_cnt,
-			     (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_UNASSIGN)?
-			     ", wait-unassign flag," : "",
-			     rd_kafka_cgrp_state_names[rkcg->rkcg_state],
-			     rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
-                return 0;
-        }
-}
-
-
-/**
- * Add partition to this cgrp management
- */
-static void rd_kafka_cgrp_partition_add (rd_kafka_cgrp_t *rkcg,
-                                         rd_kafka_toppar_t *rktp) {
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP,"PARTADD",
-                     "Group \"%s\": add %s [%"PRId32"]",
-                     rkcg->rkcg_group_id->str,
-                     rktp->rktp_rkt->rkt_topic->str,
-                     rktp->rktp_partition);
-
-        rd_kafka_assert(rkcg->rkcg_rk, !rktp->rktp_s_for_cgrp);
-        rktp->rktp_s_for_cgrp = rd_kafka_toppar_keep(rktp);
-        rd_list_add(&rkcg->rkcg_toppars, rktp->rktp_s_for_cgrp);
-}
-
-/**
- * Remove partition from this cgrp management
- */
-static void rd_kafka_cgrp_partition_del (rd_kafka_cgrp_t *rkcg,
-                                         rd_kafka_toppar_t *rktp) {
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTDEL",
-                     "Group \"%s\": delete %s [%"PRId32"]",
-                     rkcg->rkcg_group_id->str,
-                     rktp->rktp_rkt->rkt_topic->str,
-                     rktp->rktp_partition);
-        rd_kafka_assert(rkcg->rkcg_rk, rktp->rktp_s_for_cgrp);
-
-        rd_list_remove(&rkcg->rkcg_toppars, rktp->rktp_s_for_cgrp);
-        rd_kafka_toppar_destroy(rktp->rktp_s_for_cgrp);
-        rktp->rktp_s_for_cgrp = NULL;
-
-        rd_kafka_cgrp_try_terminate(rkcg);
-}
-
-
-
-/**
- * Reply for OffsetFetch from call below.
- */
-static void rd_kafka_cgrp_offsets_fetch_response (
-	rd_kafka_t *rk,
-	rd_kafka_broker_t *rkb,
-	rd_kafka_resp_err_t err,
-	rd_kafka_buf_t *reply,
-	rd_kafka_buf_t *request,
-	void *opaque) {
-	rd_kafka_topic_partition_list_t *offsets = opaque;
-	rd_kafka_cgrp_t *rkcg;
-
-	if (err == RD_KAFKA_RESP_ERR__DESTROY) {
-                /* Termination, quick cleanup. */
-		rd_kafka_topic_partition_list_destroy(offsets);
-                return;
-        }
-
-        rkcg = rd_kafka_cgrp_get(rk);
-
-        if (rd_kafka_buf_version_outdated(request, rkcg->rkcg_version)) {
-                rd_kafka_topic_partition_list_destroy(offsets);
-                return;
-        }
-
-	rd_kafka_topic_partition_list_log(rk, "OFFSETFETCH", offsets);
-	/* If all partitions already had usable offsets then there
-	 * was no request sent and thus no reply, the offsets list is
-	 * good to go. */
-	if (reply)
-		err = rd_kafka_handle_OffsetFetch(rk, rkb, err,
-						  reply, request, offsets,
-						  1/* Update toppars */);
-	if (err) {
-		rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "OFFSET",
-			     "Offset fetch error: %s",
-			     rd_kafka_err2str(err));
-
-		if (err != RD_KAFKA_RESP_ERR__WAIT_COORD)
-			rd_kafka_q_op_err(rkcg->rkcg_q,
-					  RD_KAFKA_OP_CONSUMER_ERR, err, 0,
-					  NULL, 0,
-					  "Failed to fetch offsets: %s",
-					  rd_kafka_err2str(err));
-	} else {
-		if (RD_KAFKA_CGRP_CAN_FETCH_START(rkcg))
-			rd_kafka_cgrp_partitions_fetch_start(
-				rkcg, offsets, 1 /* usable offsets */);
-		else
-			rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "OFFSET",
-				     "Group \"%.*s\": "
-				     "ignoring Offset fetch response for "
-				     "%d partition(s): in state %s",
-				     RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
-				     offsets ? offsets->cnt : -1,
-				     rd_kafka_cgrp_join_state_names[
-					     rkcg->rkcg_join_state]);
-	}
-
-	rd_kafka_topic_partition_list_destroy(offsets);
-}
-
-/**
- * Fetch offsets for a list of partitions
- */
-static void
-rd_kafka_cgrp_offsets_fetch (rd_kafka_cgrp_t *rkcg, rd_kafka_broker_t *rkb,
-                             rd_kafka_topic_partition_list_t *offsets) {
-	rd_kafka_topic_partition_list_t *use_offsets;
-
-	/* Make a copy of the offsets */
-	use_offsets = rd_kafka_topic_partition_list_copy(offsets);
-
-        if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || !rkb)
-		rd_kafka_cgrp_offsets_fetch_response(
-			rkcg->rkcg_rk, rkb, RD_KAFKA_RESP_ERR__WAIT_COORD,
-			NULL, NULL, use_offsets);
-        else {
-		rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "OFFSET",
-			     "Fetch %d offsets with v%d",
-			     use_offsets->cnt, rkcg->rkcg_version);
-                rd_kafka_OffsetFetchRequest(
-                        rkb, 1, offsets,
-                        RD_KAFKA_REPLYQ(rkcg->rkcg_ops, rkcg->rkcg_version),
-			rd_kafka_cgrp_offsets_fetch_response,
-			use_offsets);
-	}
-
-}
-
-
-/**
- * Start fetching all partitions in 'assignment' (async)
- */
-static void
-rd_kafka_cgrp_partitions_fetch_start0 (rd_kafka_cgrp_t *rkcg,
-				       rd_kafka_topic_partition_list_t
-				       *assignment, int usable_offsets,
-				       int line) {
-        int i;
-
-	/* If waiting for offsets to commit we need that to finish first
-	 * before starting fetchers (which might fetch those stored offsets).*/
-	if (rkcg->rkcg_wait_commit_cnt > 0) {
-		rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "FETCHSTART",
-			     "Group \"%s\": not starting fetchers "
-			     "for %d assigned partition(s) in join-state %s "
-			     "(usable_offsets=%s, v%"PRId32", line %d): "
-			     "waiting for %d commit(s)",
-			     rkcg->rkcg_group_id->str, assignment->cnt,
-			     rd_kafka_cgrp_join_state_names[rkcg->
-							    rkcg_join_state],
-			     usable_offsets ? "yes":"no",
-			     rkcg->rkcg_version, line,
-			     rkcg->rkcg_wait_commit_cnt);
-		return;
-	}
-
-	rd_kafka_cgrp_version_new_barrier(rkcg);
-
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "FETCHSTART",
-                     "Group \"%s\": starting fetchers for %d assigned "
-                     "partition(s) in join-state %s "
-		     "(usable_offsets=%s, v%"PRId32", line %d)",
-                     rkcg->rkcg_group_id->str, assignment->cnt,
-		     rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
-		     usable_offsets ? "yes":"no",
-		     rkcg->rkcg_version, line);
-
-	rd_kafka_topic_partition_list_log(rkcg->rkcg_rk,
-					  "FETCHSTART", assignment);
-
-        if (assignment->cnt == 0)
-                return;
-
-	/* Check if offsets are really unusable, this is to catch the
-	 * case where the entire assignment has absolute offsets set which
-	 * should make us skip offset lookups. */
-	if (!usable_offsets)
-		usable_offsets =
-			rd_kafka_topic_partition_list_count_abs_offsets(
-				assignment) == assignment->cnt;
-
-        if (!usable_offsets &&
-            rkcg->rkcg_rk->rk_conf.offset_store_method ==
-            RD_KAFKA_OFFSET_METHOD_BROKER) {
-
-                /* Fetch offsets for all assigned partitions */
-                rd_kafka_cgrp_offsets_fetch(rkcg, rkcg->rkcg_rkb, assignment);
-
-        } else {
-		rd_kafka_cgrp_set_join_state(rkcg,
-					     RD_KAFKA_CGRP_JOIN_STATE_STARTED);
-
-                for (i = 0 ; i < assignment->cnt ; i++) {
-                        rd_kafka_topic_partition_t *rktpar =
-                                &assignment->elems[i];
-                        shptr_rd_kafka_toppar_t *s_rktp = rktpar->_private;
-                        rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp);
-
-			if (!rktp->rktp_assigned) {
-				rktp->rktp_assigned = 1;
-				rkcg->rkcg_assigned_cnt++;
-
-				/* Start fetcher for partition and
-				 * forward partition's fetchq to
-				 * consumer groups queue. */
-				rd_kafka_toppar_op_fetch_start(
-					rktp, rktpar->offset,
-					rkcg->rkcg_q, RD_KAFKA_NO_REPLYQ);
-			} else {
-				int64_t offset;
-				/* Fetcher already started,
-				 * just do seek to update offset */
-				rd_kafka_toppar_lock(rktp);
-				if (rktpar->offset < rktp->rktp_app_offset)
-					offset = rktp->rktp_app_offset;
-				else
-					offset = rktpar->offset;
-				rd_kafka_toppar_unlock(rktp);
-				rd_kafka_toppar_op_seek(rktp, offset,
-							RD_KAFKA_NO_REPLYQ);
-			}
-                }
-        }
-
-	rd_kafka_assert(NULL, rkcg->rkcg_assigned_cnt <=
-			(rkcg->rkcg_assignment ? rkcg->rkcg_assignment->cnt : 0));
-}
-
-
-
-
-
-/**
- * @brief Defer offset commit (rko) until coordinator is available.
- *
- * @returns 1 if the rko was deferred or 0 if the defer queue is disabled
- *          or rko already deferred.
- */
-static int rd_kafka_cgrp_defer_offset_commit (rd_kafka_cgrp_t *rkcg,
-                                              rd_kafka_op_t *rko,
-                                              const char *reason) {
-
-        /* wait_coord_q is disabled session.timeout.ms after
-         * group close() has been initated. */
-        if (rko->rko_u.offset_commit.ts_timeout != 0 ||
-            !rd_kafka_q_ready(rkcg->rkcg_wait_coord_q))
-                return 0;
-
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COMMIT",
-                     "Group \"%s\": "
-                     "unable to OffsetCommit in state %s: %s: "
-                     "coordinator (%s) is unavailable: "
-                     "retrying later",
-                     rkcg->rkcg_group_id->str,
-                     rd_kafka_cgrp_state_names[rkcg->rkcg_state],
-                     reason,
-                     rkcg->rkcg_rkb ?
-                     rd_kafka_broker_name(rkcg->rkcg_rkb) :
-                     "none");
-
-        rko->rko_flags |= RD_KAFKA_OP_F_REPROCESS;
-        rko->rko_u.offset_commit.ts_timeout = rd_clock() +
-                (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms
-                 * 1000);
-        rd_kafka_q_enq(rkcg->rkcg_wait_coord_q, rko);
-
-        return 1;
-}
-
-
-/**
- * @brief Handler of OffsetCommit response (after parsing).
- * @remark \p offsets may be NULL if \p err is set
- * @returns the number of partitions with errors encountered
- */
-static int
-rd_kafka_cgrp_handle_OffsetCommit (rd_kafka_cgrp_t *rkcg,
-                                   rd_kafka_resp_err_t err,
-                                   rd_kafka_topic_partition_list_t
-                                   *offsets) {
-	int i;
-        int errcnt = 0;
-
-	if (!err) {
-		/* Update toppars' committed offset */
-		for (i = 0 ; i < offsets->cnt ; i++) {
-			rd_kafka_topic_partition_t *rktpar =&offsets->elems[i];
-			shptr_rd_kafka_toppar_t *s_rktp;
-			rd_kafka_toppar_t *rktp;
-
-			if (unlikely(rktpar->err)) {
-				rd_kafka_dbg(rkcg->rkcg_rk, TOPIC,
-					     "OFFSET",
-					     "OffsetCommit failed for "
-					     "%s [%"PRId32"] at offset "
-					     "%"PRId64": %s",
-					     rktpar->topic, rktpar->partition,
-					     rktpar->offset,
-					     rd_kafka_err2str(rktpar->err));
-                                errcnt++;
-				continue;
-			} else if (unlikely(rktpar->offset < 0))
-				continue;
-
-			s_rktp = rd_kafka_topic_partition_list_get_toppar(
-				rkcg->rkcg_rk, rktpar);
-			if (!s_rktp)
-				continue;
-
-			rktp = rd_kafka_toppar_s2i(s_rktp);
-			rd_kafka_toppar_lock(rktp);
-			rktp->rktp_committed_offset = rktpar->offset;
-			rd_kafka_toppar_unlock(rktp);
-
-			rd_kafka_toppar_destroy(s_rktp);
-		}
-	}
-
-        if (rd_kafka_cgrp_try_terminate(rkcg))
-                return errcnt; /* terminated */
-
-        if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN)
-		rd_kafka_cgrp_check_unassign_done(rkcg,
-                                                  "OffsetCommit done");
-
-        return errcnt;
-}
-
-
-
-
-/**
- * Handle OffsetCommitResponse
- * Takes the original 'rko' as opaque argument.
- * @remark \p rkb, rkbuf, and request may be NULL in a number of
- *         error cases (e.g., _NO_OFFSET, _WAIT_COORD)
- */
-static void rd_kafka_cgrp_op_handle_OffsetCommit (rd_kafka_t *rk,
-						  rd_kafka_broker_t *rkb,
-						  rd_kafka_resp_err_t err,
-						  rd_kafka_buf_t *rkbuf,
-						  rd_kafka_buf_t *request,
-						  void *opaque) {
-	rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
-        rd_kafka_op_t *rko_orig = opaque;
-	rd_kafka_topic_partition_list_t *offsets =
-		rko_orig->rko_u.offset_commit.partitions; /* maybe NULL */
-        int errcnt;
-        int offset_commit_cb_served = 0;
-
-	RD_KAFKA_OP_TYPE_ASSERT(rko_orig, RD_KAFKA_OP_OFFSET_COMMIT);
-
-        if (rd_kafka_buf_version_outdated(request, rkcg->rkcg_version))
-                err = RD_KAFKA_RESP_ERR__DESTROY;
-
-	err = rd_kafka_handle_OffsetCommit(rk, rkb, err, rkbuf,
-					   request, offsets);
-
-        if (rkb)
-                rd_rkb_dbg(rkb, CGRP, "COMMIT",
-                           "OffsetCommit for %d partition(s): %s: returned: %s",
-                           offsets ? offsets->cnt : -1,
-                           rko_orig->rko_u.offset_commit.reason,
-                           rd_kafka_err2str(err));
-        else
-                rd_kafka_dbg(rk, CGRP, "COMMIT",
-                             "OffsetCommit for %d partition(s): %s: returned: %s",
-                             offsets ? offsets->cnt : -1,
-                             rko_orig->rko_u.offset_commit.reason,
-                             rd_kafka_err2str(err));
-
-        if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS)
-                return; /* Retrying */
-        else if (err == RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP ||
-                 err == RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE) {
-
-                /* future-proofing, see timeout_scan(). */
-                rd_kafka_assert(NULL, err != RD_KAFKA_RESP_ERR__WAIT_COORD);
-
-                if (rd_kafka_cgrp_defer_offset_commit(rkcg, rko_orig,
-                                                      rd_kafka_err2str(err)))
-                        return;
-
-                /* FALLTHRU and error out */
-        }
-
-	rd_kafka_assert(NULL, rkcg->rkcg_wait_commit_cnt > 0);
-	rkcg->rkcg_wait_commit_cnt--;
-
-	if (err == RD_KAFKA_RESP_ERR__DESTROY ||
-            (err == RD_KAFKA_RESP_ERR__NO_OFFSET &&
-             rko_orig->rko_u.offset_commit.silent_empty)) {
-		rd_kafka_op_destroy(rko_orig);
-                rd_kafka_cgrp_check_unassign_done(
-                        rkcg,
-                        err == RD_KAFKA_RESP_ERR__DESTROY ?
-                        "OffsetCommit done (__DESTROY)" :
-                        "OffsetCommit done (__NO_OFFSET)");
-		return;
-	}
-
-        /* Call on_commit interceptors */
-        if (err != RD_KAFKA_RESP_ERR__NO_OFFSET &&
-            err != RD_KAFKA_RESP_ERR__DESTROY &&
-            offsets && offsets->cnt > 0)
-                rd_kafka_interceptors_on_commit(rk, offsets, err);
-
-
-	/* If no special callback is set but a offset_commit_cb has
-	 * been set in conf then post an event for the latter. */
-	if (!rko_orig->rko_u.offset_commit.cb && rk->rk_conf.offset_commit_cb) {
-                rd_kafka_op_t *rko_reply = rd_kafka_op_new_reply(rko_orig, err);
-
-                rd_kafka_op_set_prio(rko_reply, RD_KAFKA_PRIO_HIGH);
-
-		if (offsets)
-			rko_reply->rko_u.offset_commit.partitions =
-				rd_kafka_topic_partition_list_copy(offsets);
-
-		rko_reply->rko_u.offset_commit.cb =
-			rk->rk_conf.offset_commit_cb;
-		rko_reply->rko_u.offset_commit.opaque = rk->rk_conf.opaque;
-
-                rd_kafka_q_enq(rk->rk_rep, rko_reply);
-                offset_commit_cb_served++;
-	}
-
-
-	/* Enqueue reply to requester's queue, if any. */
-	if (rko_orig->rko_replyq.q) {
-                rd_kafka_op_t *rko_reply = rd_kafka_op_new_reply(rko_orig, err);
-
-                rd_kafka_op_set_prio(rko_reply, RD_KAFKA_PRIO_HIGH);
-
-		/* Copy offset & partitions & callbacks to reply op */
-		rko_reply->rko_u.offset_commit = rko_orig->rko_u.offset_commit;
-		if (offsets)
-			rko_reply->rko_u.offset_commit.partitions =
-				rd_kafka_topic_partition_list_copy(offsets);
-                if (rko_reply->rko_u.offset_commit.reason)
-                        rko_reply->rko_u.offset_commit.reason =
-                        rd_strdup(rko_reply->rko_u.offset_commit.reason);
-
-                rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko_reply, 0);
-                offset_commit_cb_served++;
-        }
-
-        errcnt = rd_kafka_cgrp_handle_OffsetCommit(rkcg, err, offsets);
-
-        if (!offset_commit_cb_served &&
-            err != RD_KAFKA_RESP_ERR_NO_ERROR &&
-            err != RD_KAFKA_RESP_ERR__NO_OFFSET) {
-                /* If there is no callback or handler for this (auto)
-                 * commit then raise an error to the application (#1043) */
-                char tmp[512];
-
-                rd_kafka_topic_partition_list_str(
-                        offsets, tmp, sizeof(tmp),
-                        /*no partition-errs if a global error*/
-                        RD_KAFKA_FMT_F_OFFSET |
-                        (err ? 0 : RD_KAFKA_FMT_F_ONLY_ERR));
-
-                rd_kafka_log(rkcg->rkcg_rk, LOG_WARNING, "COMMITFAIL",
-                             "Offset commit (%s) failed "
-                             "for %d/%d partition(s): "
-                             "%s%s%s",
-                             rko_orig->rko_u.offset_commit.reason,
-                             err ? offsets->cnt : errcnt, offsets->cnt,
-                             err ? rd_kafka_err2str(err) : "",
-                             err ? ": " : "",
-                             tmp);
-        }
-
-        rd_kafka_op_destroy(rko_orig);
-}
-
-
-static size_t rd_kafka_topic_partition_has_absolute_offset (
-        const rd_kafka_topic_partition_t *rktpar, void *opaque) {
-        return rktpar->offset >= 0 ? 1 : 0;
-}
-
-
-/**
- * Commit a list of offsets.
- * Reuse the orignating 'rko' for the async reply.
- * 'rko->rko_payload' should either by NULL (to commit current assignment) or
- * a proper topic_partition_list_t with offsets to commit.
- * The offset list will be altered.
- *
- * \p rko...silent_empty: if there are no offsets to commit bail out
- *                        silently without posting an op on the reply queue.
- * \p set_offsets: set offsets in rko->rko_u.offset_commit.partitions
- *
- * \p op_version: cgrp's op version to use (or 0)
- *
- * Locality: cgrp thread
- */
-static void rd_kafka_cgrp_offsets_commit (rd_kafka_cgrp_t *rkcg,
-                                          rd_kafka_op_t *rko,
-                                          int set_offsets,
-                                          const char *reason,
-                                          int op_version) {
-	rd_kafka_topic_partition_list_t *offsets;
-	rd_kafka_resp_err_t err;
-        int valid_offsets = 0;
-
-	/* If offsets is NULL we shall use the current assignment. */
-	if (!rko->rko_u.offset_commit.partitions && rkcg->rkcg_assignment)
-		rko->rko_u.offset_commit.partitions =
-			rd_kafka_topic_partition_list_copy(
-				rkcg->rkcg_assignment);
-
-	offsets = rko->rko_u.offset_commit.partitions;
-
-        if (offsets) {
-                /* Set offsets to commits */
-                if (set_offsets)
-                        rd_kafka_topic_partition_list_set_offsets(
-			rkcg->rkcg_rk, rko->rko_u.offset_commit.partitions, 1,
-			RD_KAFKA_OFFSET_INVALID/* def */,
-			1 /* is commit */);
-
-                /*  Check the number of valid offsets to commit. */
-                valid_offsets = (int)rd_kafka_topic_partition_list_sum(
-                        offsets,
-                        rd_kafka_topic_partition_has_absolute_offset, NULL);
-        }
-
-        if (!(rko->rko_flags & RD_KAFKA_OP_F_REPROCESS)) {
-                /* wait_commit_cnt has already been increased for
-                 * reprocessed ops. */
-                rkcg->rkcg_wait_commit_cnt++;
-        }
-
-	if (!valid_offsets) {
-                /* No valid offsets */
-                err = RD_KAFKA_RESP_ERR__NO_OFFSET;
-                goto err;
-	}
-
-        if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || !rkcg->rkcg_rkb ||
-	    rkcg->rkcg_rkb->rkb_source == RD_KAFKA_INTERNAL) {
-
-		if (rd_kafka_cgrp_defer_offset_commit(rkcg, rko, reason))
-			return;
-
-		err = RD_KAFKA_RESP_ERR__WAIT_COORD;
-
-	} else {
-                int r;
-
-                /* Send OffsetCommit */
-                r = rd_kafka_OffsetCommitRequest(
-                            rkcg->rkcg_rkb, rkcg, 1, offsets,
-                            RD_KAFKA_REPLYQ(rkcg->rkcg_ops, op_version),
-                            rd_kafka_cgrp_op_handle_OffsetCommit, rko,
-                        reason);
-
-                /* Must have valid offsets to commit if we get here */
-                rd_kafka_assert(NULL, r != 0);
-
-                return;
-        }
-
-
-
- err:
-	/* Propagate error to whoever wanted offset committed. */
-	rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COMMIT",
-		     "OffsetCommit internal error: %s", rd_kafka_err2str(err));
-	rd_kafka_cgrp_op_handle_OffsetCommit(rkcg->rkcg_rk, NULL, err,
-					     NULL, NULL, rko);
-}
-
-
-/**
- * Commit offsets for all assigned partitions.
- */
-static void
-rd_kafka_cgrp_assigned_offsets_commit (rd_kafka_cgrp_t *rkcg,
-                                       const rd_kafka_topic_partition_list_t
-                                       *offsets, const char *reason) {
-        rd_kafka_op_t *rko;
-
-	rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT);
-        rko->rko_u.offset_commit.reason = rd_strdup(reason);
-	if (rkcg->rkcg_rk->rk_conf.enabled_events & RD_KAFKA_EVENT_OFFSET_COMMIT) {
-		rd_kafka_op_set_replyq(rko, rkcg->rkcg_rk->rk_rep, 0);
-		rko->rko_u.offset_commit.cb =
-			rkcg->rkcg_rk->rk_conf.offset_commit_cb; /*maybe NULL*/
-		rko->rko_u.offset_commit.opaque = rkcg->rkcg_rk->rk_conf.opaque;
-	}
-        /* NULL partitions means current assignment */
-        if (offsets)
-                rko->rko_u.offset_commit.partitions =
-                        rd_kafka_topic_partition_list_copy(offsets);
-	rko->rko_u.offset_commit.silent_empty = 1;
-        rd_kafka_cgrp_offsets_commit(rkcg, rko, 1/* set offsets */, reason,
-                                     rkcg->rkcg_version);
-}
-
-
-/**
- * auto.commit.interval.ms commit timer callback.
- *
- * Trigger a group offset commit.
- *
- * Locality: rdkafka main thread
- */
-static void rd_kafka_cgrp_offset_commit_tmr_cb (rd_kafka_timers_t *rkts,
-                                                void *arg) {
-        rd_kafka_cgrp_t *rkcg = arg;
-
-	rd_kafka_cgrp_assigned_offsets_commit(rkcg, NULL,
-                                              "cgrp auto commit timer");
-}
-
-
-
-
-/**
- * Call when all unassign operations are done to transition to the next state
- */
-static void rd_kafka_cgrp_unassign_done (rd_kafka_cgrp_t *rkcg,
-                                         const char *reason) {
-	rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN",
-		     "Group \"%s\": unassign done in state %s (join state %s): "
-		     "%s: %s",
-		     rkcg->rkcg_group_id->str,
-		     rd_kafka_cgrp_state_names[rkcg->rkcg_state],
-		     rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
-		     rkcg->rkcg_assignment ?
-		     "with new assignment" : "without new assignment",
-                     reason);
-
-	if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN) {
-		rd_kafka_cgrp_leave(rkcg, 1/*ignore response*/);
-		rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN;
-	}
-
-        if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN) {
-                rd_kafka_cgrp_try_terminate(rkcg);
-                return;
-        }
-
-        if (rkcg->rkcg_assignment) {
-		rd_kafka_cgrp_set_join_state(rkcg,
-					     RD_KAFKA_CGRP_JOIN_STATE_ASSIGNED);
-                if (RD_KAFKA_CGRP_CAN_FETCH_START(rkcg))
-                        rd_kafka_cgrp_partitions_fetch_start(
-                                rkcg, rkcg->rkcg_assignment, 0);
-	} else {
-		rd_kafka_cgrp_set_join_state(rkcg,
-					     RD_KAFKA_CGRP_JOIN_STATE_INIT);
-	}
-
-	rd_kafka_cgrp_try_terminate(rkcg);
-}
-
-
-/**
- * Checks if the current unassignment is done and if so
- * calls .._done().
- * Else does nothing.
- */
-static void rd_kafka_cgrp_check_unassign_done (rd_kafka_cgrp_t *rkcg,
-                                               const char *reason) {
-	if (rkcg->rkcg_wait_unassign_cnt > 0 ||
-	    rkcg->rkcg_assigned_cnt > 0 ||
-	    rkcg->rkcg_wait_commit_cnt > 0 ||
-	    rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_UNASSIGN) {
-                rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN",
-                             "Unassign not done yet "
-                             "(%d wait_unassign, %d assigned, %d wait commit"
-                             "%s): %s",
-                             rkcg->rkcg_wait_unassign_cnt,
-                             rkcg->rkcg_assigned_cnt,
-                             rkcg->rkcg_wait_commit_cnt,
-                             (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_UNASSIGN)?
-                             ", F_WAIT_UNASSIGN" : "", reason);
-		return;
-        }
-
-	rd_kafka_cgrp_unassign_done(rkcg, reason);
-}
-
-
-
-/**
- * Remove existing assignment.
- */
-static rd_kafka_resp_err_t
-rd_kafka_cgrp_unassign (rd_kafka_cgrp_t *rkcg) {
-        int i;
-        rd_kafka_topic_partition_list_t *old_assignment;
-
-        rd_kafka_cgrp_set_join_state(rkcg,
-                                     RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN);
-
-	rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WAIT_UNASSIGN;
-        old_assignment = rkcg->rkcg_assignment;
-        if (!old_assignment) {
-		rd_kafka_cgrp_check_unassign_done(
-                        rkcg, "unassign (no previous assignment)");
-                return RD_KAFKA_RESP_ERR_NO_ERROR;
-	}
-        rkcg->rkcg_assignment = NULL;
-
-	rd_kafka_cgrp_version_new_barrier(rkcg);
-
-	rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN",
-                     "Group \"%s\": unassigning %d partition(s) (v%"PRId32")",
-                     rkcg->rkcg_group_id->str, old_assignment->cnt,
-		     rkcg->rkcg_version);
-
-        if (rkcg->rkcg_rk->rk_conf.offset_store_method ==
-            RD_KAFKA_OFFSET_METHOD_BROKER &&
-	    rkcg->rkcg_rk->rk_conf.enable_auto_commit) {
-                /* Commit all offsets for all assigned partitions to broker */
-                rd_kafka_cgrp_assigned_offsets_commit(rkcg, old_assignment,
-                                                      "unassign");
-        }
-
-        for (i = 0 ; i < old_assignment->cnt ; i++) {
-                rd_kafka_topic_partition_t *rktpar;
-                shptr_rd_kafka_toppar_t *s_rktp;
-                rd_kafka_toppar_t *rktp;
-
-                rktpar = &old_assignment->elems[i];
-                s_rktp = rktpar->_private;
-                rktp = rd_kafka_toppar_s2i(s_rktp);
-
-                if (rktp->rktp_assigned) {
-                        rd_kafka_toppar_op_fetch_stop(
-				rktp, RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0));
-                        rkcg->rkcg_wait_unassign_cnt++;
-                }
-
-                rd_kafka_toppar_lock(rktp);
-                rd_kafka_toppar_desired_del(rktp);
-                rd_kafka_toppar_unlock(rktp);
-        }
-
-	/* Resume partition consumption. */
-	rd_kafka_toppars_pause_resume(rkcg->rkcg_rk, 0/*resume*/,
-				      RD_KAFKA_TOPPAR_F_LIB_PAUSE,
-                                      old_assignment);
-
-        rd_kafka_topic_partition_list_destroy(old_assignment);
-
-        rd_kafka_cgrp_check_unassign_done(rkcg, "unassign");
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * Set new atomic partition assignment
- * May update \p assignment but will not hold on to it.
- */
-static void
-rd_kafka_cgrp_assign (rd_kafka_cgrp_t *rkcg,
-                      rd_kafka_topic_partition_list_t *assignment) {
-        int i;
-
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN",
-                     "Group \"%s\": new assignment of %d partition(s) "
-                     "in join state %s",
-                     rkcg->rkcg_group_id->str,
-                     assignment ? assignment->cnt : 0,
-                     rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
-
-        /* Get toppar object for each partition.
-         * This is to make sure the rktp stays alive during unassign(). */
-        for (i = 0 ; assignment && i < assignment->cnt ; i++) {
-                rd_kafka_topic_partition_t *rktpar;
-                shptr_rd_kafka_toppar_t *s_rktp;
-
-                rktpar = &assignment->elems[i];
-
-                /* Use existing toppar if set */
-                if (rktpar->_private)
-                        continue;
-
-                s_rktp = rd_kafka_toppar_get2(rkcg->rkcg_rk,
-                                              rktpar->topic,
-                                              rktpar->partition,
-                                              0/*no-ua*/, 1/*create-on-miss*/);
-                if (s_rktp)
-                        rktpar->_private = s_rktp;
-        }
-
-        rd_kafka_cgrp_version_new_barrier(rkcg);
-
-        rd_kafka_wrlock(rkcg->rkcg_rk);
-        rkcg->rkcg_c.assignment_size = assignment ? assignment->cnt : 0;
-        rd_kafka_wrunlock(rkcg->rkcg_rk);
-
-
-        /* Remove existing assignment (async operation) */
-	if (rkcg->rkcg_assignment)
-		rd_kafka_cgrp_unassign(rkcg);
-
-        rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN",
-                     "Group \"%s\": assigning %d partition(s) in join state %s",
-                     rkcg->rkcg_group_id->str, assignment ? assignment->cnt : 0,
-                     rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
-
-
-	if (assignment) {
-		rkcg->rkcg_assignment =
-			rd_kafka_topic_partition_list_copy(assignment);
-
-                /* Mark partition(s) as desired */
-                for (i = 0 ; i < rkcg->rkcg_assignment->cnt ; i++) {
-                        rd_kafka_topic_partition_t *rktpar =
-                                &rkcg->rkcg_assignment->elems[i];
-                        shptr_rd_kafka_toppar_t *s_rktp = rktpar->_private;
-                        rd_kafka_toppar_t *rktp =
-                                rd_kafka_toppar_s2i(s_rktp);
-                        rd_kafka_toppar_lock(rktp);
-                        rd_kafka_toppar_desired_add0(rktp);
-                        rd_kafka_toppar_unlock(rktp);
-                }
-        }
-
-        if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN)
-                return;
-
-        rd_dassert(rkcg->rkcg_wait_unassign_cnt == 0);
-
-        rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_ASSIGNED);
-
-        if (RD_KAFKA_CGRP_CAN_FETCH_START(rkcg) && rkcg->rkcg_assignment) {
-                /* No existing assignment that needs to be decommissioned,
-                 * start partition fetchers right away */
-                rd_kafka_cgrp_partitions_fetch_start(
-                        rkcg, rkcg->rkcg_assignment, 0);
-        }
-}
-
-
-
-
-/**
- * Handle a rebalance-triggered partition assignment.
- *
- * If a rebalance_cb has been registered we enqueue an op for the app
- * and let the app perform the actual assign() call.
- * Otherwise we assign() directly from here.
- *
- * This provides the most flexibility, allowing the app to perform any
- * operation it seem fit (e.g., offset writes or reads) before actually
- * updating the assign():ment.
- */
-static void
-rd_kafka_cgrp_handle_assignment (rd_kafka_cgrp_t *rkcg,
-				 rd_kafka_topic_partition_list_t *assignment) {
-
-	rd_kafka_rebalance_op(rkcg, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
-			      assignment, "new assignment");
-}
-
-
-/**
- * Handle HeartbeatResponse errors.
- *
- * If an IllegalGeneration error code is returned in the
- * HeartbeatResponse, it indicates that the co-ordinator has
- * initiated a rebalance. The consumer then stops fetching data,
- * commits offsets and sends a JoinGroupRequest to it's co-ordinator
- * broker */
-void rd_kafka_cgrp_handle_heartbeat_error (rd_kafka_cgrp_t *rkcg,
-					   rd_kafka_resp_err_t err) {
-
-
-	rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT",
-		     "Group \"%s\" heartbeat error response in "
-		     "state %s (join state %s, %d partition(s) assigned): %s",
-		     rkcg->rkcg_group_id->str,
-		     rd_kafka_cgrp_state_names[rkcg->rkcg_state],
-		     rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
-		     rkcg->rkcg_assignment ? rkcg->rkcg_assignment->cnt : 0,
-		     rd_kafka_err2str(err));
-
-	if (rkcg->rkcg_join_state <= RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) {
-		rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT",
-			     "Heartbeat response: discarding outdated "
-			     "request (now in join-state %s)",
-			     rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
-		return;
-	}
-
-	switch (err)
-	{
-	case RD_KAFKA_RESP_ERR__DESTROY:
-		/* quick cleanup */
-		break;
-	case RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP:
-	case RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE:
-	case RD_KAFKA_RESP_ERR__TRANSPORT:
-		/* Remain in joined state and keep querying for coordinator */
-		rd_interval_expedite(&rkcg->rkcg_coord_query_intvl, 0);
-		break;
-
-	case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID:
-		rd_kafka_cgrp_set_member_id(rkcg, "");
-	case RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS:
-	case RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION:
-	default:
-                /* Just revert to INIT state if join state is active. */
-                if (rkcg->rkcg_join_state <
-                    RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_REBALANCE_CB ||
-                    rkcg->rkcg_join_state ==
-                    RD_KAFKA_

<TRUNCATED>

[06/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/examples/rdkafka_performance.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/examples/rdkafka_performance.c b/thirdparty/librdkafka-0.11.4/examples/rdkafka_performance.c
new file mode 100644
index 0000000..bceb23b
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/examples/rdkafka_performance.c
@@ -0,0 +1,1651 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met: 
+ * 
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer. 
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution. 
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer performance tester
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#ifdef _MSC_VER
+#define  _CRT_SECURE_NO_WARNINGS /* Silence nonsense on MSVC */
+#endif
+
+#include "../src/rd.h"
+
+#define _GNU_SOURCE /* for strndup() */
+#include <ctype.h>
+#include <signal.h>
+#include <string.h>
+#include <errno.h>
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"  /* for Kafka driver */
+/* Do not include these defines from your program, they will not be
+ * provided by librdkafka. */
+#include "rd.h"
+#include "rdtime.h"
+
+#ifdef _MSC_VER
+#include "../win32/wingetopt.h"
+#include "../win32/wintime.h"
+#endif
+
+
+static int run = 1;
+static int forever = 1;
+static rd_ts_t dispintvl = 1000;
+static int do_seq = 0;
+static int exit_after = 0;
+static int exit_eof = 0;
+static FILE *stats_fp;
+static int dr_disp_div;
+static int verbosity = 1;
+static int latency_mode = 0;
+static int report_offset = 0;
+static FILE *latency_fp = NULL;
+static int msgcnt = -1;
+static int incremental_mode = 0;
+static int partition_cnt = 0;
+static int eof_cnt = 0;
+static int with_dr = 1;
+static int read_hdrs = 0;
+
+
+static void stop (int sig) {
+        if (!run)
+                exit(0);
+	run = 0;
+}
+
+static long int msgs_wait_cnt = 0;
+static long int msgs_wait_produce_cnt = 0;
+static rd_ts_t t_end;
+static rd_kafka_t *global_rk;
+
+struct avg {
+        int64_t  val;
+        int      cnt;
+        uint64_t ts_start;
+};
+
+static struct {
+	rd_ts_t  t_start;
+	rd_ts_t  t_end;
+	rd_ts_t  t_end_send;
+	uint64_t msgs;
+	uint64_t msgs_last;
+        uint64_t msgs_dr_ok;
+        uint64_t msgs_dr_err;
+        uint64_t bytes_dr_ok;
+	uint64_t bytes;
+	uint64_t bytes_last;
+	uint64_t tx;
+	uint64_t tx_err;
+        uint64_t avg_rtt;
+        uint64_t offset;
+	rd_ts_t  t_fetch_latency;
+	rd_ts_t  t_last;
+        rd_ts_t  t_enobufs_last;
+	rd_ts_t  t_total;
+        rd_ts_t  latency_last;
+        rd_ts_t  latency_lo;
+        rd_ts_t  latency_hi;
+        rd_ts_t  latency_sum;
+        int      latency_cnt;
+        int64_t  last_offset;
+} cnt;
+
+
+uint64_t wall_clock (void) {
+        struct timeval tv;
+        gettimeofday(&tv, NULL);
+        return ((uint64_t)tv.tv_sec * 1000000LLU) +
+		((uint64_t)tv.tv_usec);
+}
+
+static void err_cb (rd_kafka_t *rk, int err, const char *reason, void *opaque) {
+	printf("%% ERROR CALLBACK: %s: %s: %s\n",
+	       rd_kafka_name(rk), rd_kafka_err2str(err), reason);
+}
+
+static void throttle_cb (rd_kafka_t *rk, const char *broker_name,
+			 int32_t broker_id, int throttle_time_ms,
+			 void *opaque) {
+	printf("%% THROTTLED %dms by %s (%"PRId32")\n", throttle_time_ms,
+	       broker_name, broker_id);
+}
+
+static void offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
+                              rd_kafka_topic_partition_list_t *offsets,
+                              void *opaque) {
+        int i;
+
+        if (err || verbosity >= 2)
+                printf("%% Offset commit of %d partition(s): %s\n",
+                       offsets->cnt, rd_kafka_err2str(err));
+
+        for (i = 0 ; i < offsets->cnt ; i++) {
+                rd_kafka_topic_partition_t *rktpar = &offsets->elems[i];
+                if (rktpar->err || verbosity >= 2)
+                        printf("%%  %s [%"PRId32"] @ %"PRId64": %s\n",
+                               rktpar->topic, rktpar->partition,
+                               rktpar->offset, rd_kafka_err2str(err));
+        }
+}
+
+/**
+ * @brief Add latency measurement
+ */
+static void latency_add (int64_t ts, const char *who) {
+        if (ts > cnt.latency_hi)
+                cnt.latency_hi = ts;
+        if (!cnt.latency_lo || ts < cnt.latency_lo)
+                cnt.latency_lo = ts;
+        cnt.latency_last = ts;
+        cnt.latency_cnt++;
+        cnt.latency_sum += ts;
+        if (latency_fp)
+                fprintf(latency_fp, "%"PRIu64"\n", ts);
+}
+
+
+static void msg_delivered (rd_kafka_t *rk,
+                           const rd_kafka_message_t *rkmessage, void *opaque) {
+	static rd_ts_t last;
+	rd_ts_t now = rd_clock();
+	static int msgs;
+
+	msgs++;
+
+	msgs_wait_cnt--;
+
+	if (rkmessage->err)
+                cnt.msgs_dr_err++;
+        else {
+                cnt.msgs_dr_ok++;
+                cnt.bytes_dr_ok += rkmessage->len;
+        }
+
+        if (latency_mode) {
+                /* Extract latency */
+                int64_t source_ts;
+                if (sscanf(rkmessage->payload, "LATENCY:%"SCNd64,
+                           &source_ts) == 1)
+                        latency_add(wall_clock() - source_ts, "producer");
+        }
+
+
+	if ((rkmessage->err &&
+	     (cnt.msgs_dr_err < 50 ||
+              !(cnt.msgs_dr_err % (dispintvl / 1000)))) ||
+	    !last || msgs_wait_cnt < 5 ||
+	    !(msgs_wait_cnt % dr_disp_div) || 
+	    (now - last) >= dispintvl * 1000 ||
+            verbosity >= 3) {
+		if (rkmessage->err && verbosity >= 2)
+			printf("%% Message delivery failed: %s [%"PRId32"]: "
+			       "%s (%li remain)\n",
+			       rd_kafka_topic_name(rkmessage->rkt),
+			       rkmessage->partition,
+			       rd_kafka_err2str(rkmessage->err),
+			       msgs_wait_cnt);
+		else if (verbosity > 2)
+			printf("%% Message delivered (offset %"PRId64"): "
+                               "%li remain\n",
+                               rkmessage->offset, msgs_wait_cnt);
+		if (verbosity >= 3 && do_seq)
+			printf(" --> \"%.*s\"\n",
+                               (int)rkmessage->len,
+                               (const char *)rkmessage->payload);
+		last = now;
+	}
+
+        if (report_offset)
+                cnt.last_offset = rkmessage->offset;
+
+	if (msgs_wait_produce_cnt == 0 && msgs_wait_cnt == 0 && !forever) {
+		if (verbosity >= 2)
+			printf("All messages delivered!\n");
+		t_end = rd_clock();
+		run = 0;
+	}
+
+	if (exit_after && exit_after <= msgs) {
+		printf("%% Hard exit after %i messages, as requested\n",
+		       exit_after);
+		exit(0);
+	}
+}
+
+
+static void msg_consume (rd_kafka_message_t *rkmessage, void *opaque) {
+
+	if (rkmessage->err) {
+		if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+                        cnt.offset = rkmessage->offset;
+
+                        if (verbosity >= 1)
+                                printf("%% Consumer reached end of "
+                                       "%s [%"PRId32"] "
+                                       "message queue at offset %"PRId64"\n",
+                                       rd_kafka_topic_name(rkmessage->rkt),
+                                       rkmessage->partition, rkmessage->offset);
+
+			if (exit_eof && ++eof_cnt == partition_cnt)
+				run = 0;
+
+			return;
+		}
+
+		printf("%% Consume error for topic \"%s\" [%"PRId32"] "
+		       "offset %"PRId64": %s\n",
+		       rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt):"",
+		       rkmessage->partition,
+		       rkmessage->offset,
+		       rd_kafka_message_errstr(rkmessage));
+
+                if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
+                    rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+                        run = 0;
+
+                cnt.msgs_dr_err++;
+		return;
+	}
+
+	/* Start measuring from first message received */
+	if (!cnt.t_start)
+		cnt.t_start = cnt.t_last = rd_clock();
+
+        cnt.offset = rkmessage->offset;
+	cnt.msgs++;
+	cnt.bytes += rkmessage->len;
+
+	if (verbosity >= 3 ||
+            (verbosity >= 2 && !(cnt.msgs % 1000000)))
+		printf("@%"PRId64": %.*s: %.*s\n",
+		       rkmessage->offset,
+                       (int)rkmessage->key_len, (char *)rkmessage->key,
+		       (int)rkmessage->len, (char *)rkmessage->payload);
+
+
+        if (latency_mode) {
+                int64_t remote_ts, ts;
+
+                if (rkmessage->len > 8 &&
+                    !memcmp(rkmessage->payload, "LATENCY:", 8) &&
+                    sscanf(rkmessage->payload, "LATENCY:%"SCNd64,
+                           &remote_ts) == 1) {
+                        ts = wall_clock() - remote_ts;
+                        if (ts > 0 && ts < (1000000 * 60 * 5)) {
+                                latency_add(ts, "consumer");
+                        } else {
+                                if (verbosity >= 1)
+                                        printf("Received latency timestamp is too far off: %"PRId64"us (message offset %"PRId64"): ignored\n",
+                                               ts, rkmessage->offset);
+                        }
+                } else if (verbosity > 1)
+                        printf("not a LATENCY payload: %.*s\n",
+                               (int)rkmessage->len,
+                               (char *)rkmessage->payload);
+
+        }
+
+        if (read_hdrs) {
+                rd_kafka_headers_t *hdrs;
+                /* Force parsing of headers but don't do anything with them. */
+                rd_kafka_message_headers(rkmessage, &hdrs);
+        }
+
+        if (msgcnt != -1 && (int)cnt.msgs >= msgcnt)
+                run = 0;
+}
+
+
+static void rebalance_cb (rd_kafka_t *rk,
+			  rd_kafka_resp_err_t err,
+			  rd_kafka_topic_partition_list_t *partitions,
+			  void *opaque) {
+
+	switch (err)
+	{
+	case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+		fprintf(stderr,
+			"%% Group rebalanced: %d partition(s) assigned\n",
+			partitions->cnt);
+		eof_cnt = 0;
+		partition_cnt = partitions->cnt;
+		rd_kafka_assign(rk, partitions);
+		break;
+
+	case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+		fprintf(stderr,
+			"%% Group rebalanced: %d partition(s) revoked\n",
+			partitions->cnt);
+		eof_cnt = 0;
+		partition_cnt = 0;
+		rd_kafka_assign(rk, NULL);
+		break;
+
+	default:
+		break;
+	}
+}
+
+
+/**
+ * Find and extract single value from a two-level search.
+ * First find 'field1', then find 'field2' and extract its value.
+ * Returns 0 on miss else the value.
+ */
+static uint64_t json_parse_fields (const char *json, const char **end,
+                                   const char *field1, const char *field2) {
+        const char *t = json;
+        const char *t2;
+        int len1 = (int)strlen(field1);
+        int len2 = (int)strlen(field2);
+
+        while ((t2 = strstr(t, field1))) {
+                uint64_t v;
+
+                t = t2;
+                t += len1;
+
+                /* Find field */
+                if (!(t2 = strstr(t, field2)))
+                        continue;
+                t2 += len2;
+
+                while (isspace((int)*t2))
+                        t2++;
+
+                v = strtoull(t2, (char **)&t, 10);
+                if (t2 == t)
+                        continue;
+
+                *end = t;
+                return v;
+        }
+
+        *end = t + strlen(t);
+        return 0;
+}
+
+/**
+ * Parse various values from rdkafka stats
+ */
+static void json_parse_stats (const char *json) {
+        const char *t;
+#define MAX_AVGS 100 /* max number of brokers to scan for rtt */
+        uint64_t avg_rtt[MAX_AVGS+1];
+        int avg_rtt_i     = 0;
+
+        /* Store totals at end of array */
+        avg_rtt[MAX_AVGS]     = 0;
+
+        /* Extract all broker RTTs */
+        t = json;
+        while (avg_rtt_i < MAX_AVGS && *t) {
+                avg_rtt[avg_rtt_i] = json_parse_fields(t, &t,
+                                                       "\"rtt\":",
+                                                       "\"avg\":");
+
+                /* Skip low RTT values, means no messages are passing */
+                if (avg_rtt[avg_rtt_i] < 100 /*0.1ms*/)
+                        continue;
+
+
+                avg_rtt[MAX_AVGS] += avg_rtt[avg_rtt_i];
+                avg_rtt_i++;
+        }
+
+        if (avg_rtt_i > 0)
+                avg_rtt[MAX_AVGS] /= avg_rtt_i;
+
+        cnt.avg_rtt = avg_rtt[MAX_AVGS];
+}
+
+
+static int stats_cb (rd_kafka_t *rk, char *json, size_t json_len,
+		     void *opaque) {
+
+        /* Extract values for our own stats */
+        json_parse_stats(json);
+
+        if (stats_fp)
+                fprintf(stats_fp, "%s\n", json);
+	return 0;
+}
+
+#define _OTYPE_TAB      0x1  /* tabular format */
+#define _OTYPE_SUMMARY  0x2  /* summary format */
+#define _OTYPE_FORCE    0x4  /* force output regardless of interval timing */
+static void print_stats (rd_kafka_t *rk,
+                         int mode, int otype, const char *compression) {
+	rd_ts_t now = rd_clock();
+	rd_ts_t t_total;
+        static int rows_written = 0;
+        int print_header;
+        double latency_avg = 0.0f;
+        char extra[512];
+        int extra_of = 0;
+        *extra = '\0';
+
+	if (!(otype & _OTYPE_FORCE) &&
+            (((otype & _OTYPE_SUMMARY) && verbosity == 0) ||
+             cnt.t_last + dispintvl > now))
+		return;
+
+        print_header = !rows_written ||(verbosity > 0 && !(rows_written % 20));
+
+	if (cnt.t_end_send)
+		t_total = cnt.t_end_send - cnt.t_start;
+	else if (cnt.t_end)
+		t_total = cnt.t_end - cnt.t_start;
+	else if (cnt.t_start)
+		t_total = now - cnt.t_start;
+	else
+		t_total = 1;
+
+        if (latency_mode && cnt.latency_cnt)
+                latency_avg = (double)cnt.latency_sum /
+                        (double)cnt.latency_cnt;
+
+        if (mode == 'P') {
+
+                if (otype & _OTYPE_TAB) {
+#define ROW_START()        do {} while (0)
+#define COL_HDR(NAME)      printf("| %10.10s ", (NAME))
+#define COL_PR64(NAME,VAL) printf("| %10"PRIu64" ", (VAL))
+#define COL_PRF(NAME,VAL)  printf("| %10.2f ", (VAL))
+#define ROW_END()          do {                 \
+                                printf("\n");   \
+                                rows_written++; \
+                        } while (0)
+
+                        if (print_header) {
+                                /* First time, print header */
+                                ROW_START();
+                                COL_HDR("elapsed");
+                                COL_HDR("msgs");
+                                COL_HDR("bytes");
+                                COL_HDR("rtt");
+                                COL_HDR("dr");
+                                COL_HDR("dr_m/s");
+                                COL_HDR("dr_MB/s");
+                                COL_HDR("dr_err");
+                                COL_HDR("tx_err");
+                                COL_HDR("outq");
+                                if (report_offset)
+                                        COL_HDR("offset");
+                                if (latency_mode) {
+                                        COL_HDR("lat_curr");
+                                        COL_HDR("lat_avg");
+                                        COL_HDR("lat_lo");
+                                        COL_HDR("lat_hi");
+                                }
+
+                                ROW_END();
+                        }
+
+                        ROW_START();
+                        COL_PR64("elapsed", t_total / 1000);
+                        COL_PR64("msgs", cnt.msgs);
+                        COL_PR64("bytes", cnt.bytes);
+                        COL_PR64("rtt", cnt.avg_rtt / 1000);
+                        COL_PR64("dr", cnt.msgs_dr_ok);
+                        COL_PR64("dr_m/s",
+                                 ((cnt.msgs_dr_ok * 1000000) / t_total));
+                        COL_PRF("dr_MB/s",
+                                (float)((cnt.bytes_dr_ok) / (float)t_total));
+                        COL_PR64("dr_err", cnt.msgs_dr_err);
+                        COL_PR64("tx_err", cnt.tx_err);
+                        COL_PR64("outq",
+                                 rk ? (uint64_t)rd_kafka_outq_len(rk) : 0);
+                        if (report_offset)
+                                COL_PR64("offset", (uint64_t)cnt.last_offset);
+                        if (latency_mode) {
+                                COL_PRF("lat_curr", cnt.latency_last / 1000.0f);
+                                COL_PRF("lat_avg", latency_avg / 1000.0f);
+                                COL_PRF("lat_lo", cnt.latency_lo / 1000.0f);
+                                COL_PRF("lat_hi", cnt.latency_hi / 1000.0f);
+                        }
+                        ROW_END();
+                }
+
+                if (otype & _OTYPE_SUMMARY) {
+                        printf("%% %"PRIu64" messages produced "
+                               "(%"PRIu64" bytes), "
+                               "%"PRIu64" delivered "
+                               "(offset %"PRId64", %"PRIu64" failed) "
+                               "in %"PRIu64"ms: %"PRIu64" msgs/s and "
+                               "%.02f MB/s, "
+                               "%"PRIu64" produce failures, %i in queue, "
+                               "%s compression\n",
+                               cnt.msgs, cnt.bytes,
+                               cnt.msgs_dr_ok, cnt.last_offset, cnt.msgs_dr_err,
+                               t_total / 1000,
+                               ((cnt.msgs_dr_ok * 1000000) / t_total),
+                               (float)((cnt.bytes_dr_ok) / (float)t_total),
+                               cnt.tx_err,
+                               rk ? rd_kafka_outq_len(rk) : 0,
+                               compression);
+                }
+
+        } else {
+
+                if (otype & _OTYPE_TAB) {
+                        if (print_header) {
+                                /* First time, print header */
+                                ROW_START();
+                                COL_HDR("elapsed");
+                                COL_HDR("msgs");
+                                COL_HDR("bytes");
+                                COL_HDR("rtt");
+                                COL_HDR("m/s");
+                                COL_HDR("MB/s");
+                                COL_HDR("rx_err");
+                                COL_HDR("offset");
+                                if (latency_mode) {
+                                        COL_HDR("lat_curr");
+                                        COL_HDR("lat_avg");
+                                        COL_HDR("lat_lo");
+                                        COL_HDR("lat_hi");
+                                }
+                                ROW_END();
+                        }
+
+                        ROW_START();
+                        COL_PR64("elapsed", t_total / 1000);
+                        COL_PR64("msgs", cnt.msgs);
+                        COL_PR64("bytes", cnt.bytes);
+                        COL_PR64("rtt", cnt.avg_rtt / 1000);
+                        COL_PR64("m/s",
+                                 ((cnt.msgs * 1000000) / t_total));
+                        COL_PRF("MB/s",
+                                (float)((cnt.bytes) / (float)t_total));
+                        COL_PR64("rx_err", cnt.msgs_dr_err);
+                        COL_PR64("offset", cnt.offset);
+                        if (latency_mode) {
+                                COL_PRF("lat_curr", cnt.latency_last / 1000.0f);
+                                COL_PRF("lat_avg", latency_avg / 1000.0f);
+                                COL_PRF("lat_lo", cnt.latency_lo / 1000.0f);
+                                COL_PRF("lat_hi", cnt.latency_hi / 1000.0f);
+                        }
+                        ROW_END();
+
+                }
+
+                if (otype & _OTYPE_SUMMARY) {
+                        if (latency_avg >= 1.0f)
+                                extra_of += rd_snprintf(extra+extra_of,
+                                                     sizeof(extra)-extra_of,
+                                                     ", latency "
+                                                     "curr/avg/lo/hi "
+                                                     "%.2f/%.2f/%.2f/%.2fms",
+                                                     cnt.latency_last / 1000.0f,
+                                                     latency_avg  / 1000.0f,
+                                                     cnt.latency_lo / 1000.0f,
+                                                     cnt.latency_hi / 1000.0f)
+;
+                        printf("%% %"PRIu64" messages (%"PRIu64" bytes) "
+                               "consumed in %"PRIu64"ms: %"PRIu64" msgs/s "
+                               "(%.02f MB/s)"
+                               "%s\n",
+                               cnt.msgs, cnt.bytes,
+                               t_total / 1000,
+                               ((cnt.msgs * 1000000) / t_total),
+                               (float)((cnt.bytes) / (float)t_total),
+                               extra);
+                }
+
+                if (incremental_mode && now > cnt.t_last) {
+                        uint64_t i_msgs = cnt.msgs - cnt.msgs_last;
+                        uint64_t i_bytes = cnt.bytes - cnt.bytes_last;
+                        uint64_t i_time = cnt.t_last ? now - cnt.t_last : 0;
+
+                        printf("%% INTERVAL: %"PRIu64" messages "
+                               "(%"PRIu64" bytes) "
+                               "consumed in %"PRIu64"ms: %"PRIu64" msgs/s "
+                               "(%.02f MB/s)"
+                               "%s\n",
+                               i_msgs, i_bytes,
+                               i_time / 1000,
+                               ((i_msgs * 1000000) / i_time),
+                               (float)((i_bytes) / (float)i_time),
+                               extra);
+
+                }
+        }
+
+	cnt.t_last = now;
+	cnt.msgs_last = cnt.msgs;
+	cnt.bytes_last = cnt.bytes;
+}
+
+
+static void sig_usr1 (int sig) {
+	rd_kafka_dump(stdout, global_rk);
+}
+
+
+/**
+ * @brief Read config from file
+ * @returns -1 on error, else 0.
+ */
+static int read_conf_file (rd_kafka_conf_t *conf,
+                           rd_kafka_topic_conf_t *tconf, const char *path) {
+        FILE *fp;
+        char buf[512];
+        int line = 0;
+        char errstr[512];
+
+        if (!(fp = fopen(path, "r"))) {
+                fprintf(stderr, "%% Failed to open %s: %s\n",
+                        path, strerror(errno));
+                return -1;
+        }
+
+        while (fgets(buf, sizeof(buf), fp)) {
+                char *s = buf;
+                char *t;
+                rd_kafka_conf_res_t r = RD_KAFKA_CONF_UNKNOWN;
+
+                line++;
+
+                while (isspace((int)*s))
+                        s++;
+
+                if (!*s || *s == '#')
+                        continue;
+
+                if ((t = strchr(buf, '\n')))
+                        *t = '\0';
+
+                t = strchr(buf, '=');
+                if (!t || t == s || !*(t+1)) {
+                        fprintf(stderr, "%% %s:%d: expected key=value\n",
+                                path, line);
+                        fclose(fp);
+                        return -1;
+                }
+
+                *(t++) = '\0';
+
+                /* Try property on topic config first */
+                if (tconf)
+                        r = rd_kafka_topic_conf_set(tconf, s, t,
+                                                    errstr, sizeof(errstr));
+
+                /* Try global config */
+                if (r == RD_KAFKA_CONF_UNKNOWN)
+                        r = rd_kafka_conf_set(conf, s, t,
+                                              errstr, sizeof(errstr));
+
+                if (r == RD_KAFKA_CONF_OK)
+                        continue;
+
+                fprintf(stderr, "%% %s:%d: %s=%s: %s\n",
+                        path, line, s, t, errstr);
+                fclose(fp);
+                return -1;
+        }
+
+        fclose(fp);
+
+        return 0;
+}
+
+
+static rd_kafka_resp_err_t do_produce (rd_kafka_t *rk,
+                                       rd_kafka_topic_t *rkt, int32_t partition,
+                                       int msgflags,
+                                       void *payload, size_t size,
+                                       const void *key, size_t key_size,
+                                       const rd_kafka_headers_t *hdrs) {
+
+        /* Send/Produce message. */
+        if (hdrs) {
+                rd_kafka_headers_t *hdrs_copy;
+                rd_kafka_resp_err_t err;
+
+                hdrs_copy = rd_kafka_headers_copy(hdrs);
+
+                err = rd_kafka_producev(
+                        rk,
+                        RD_KAFKA_V_RKT(rkt),
+                        RD_KAFKA_V_PARTITION(partition),
+                        RD_KAFKA_V_MSGFLAGS(msgflags),
+                        RD_KAFKA_V_VALUE(payload, size),
+                        RD_KAFKA_V_KEY(key, key_size),
+                        RD_KAFKA_V_HEADERS(hdrs_copy),
+                        RD_KAFKA_V_END);
+
+                if (err)
+                        rd_kafka_headers_destroy(hdrs_copy);
+
+                return err;
+
+        } else {
+                if (rd_kafka_produce(rkt, partition, msgflags, payload, size,
+                                     key, key_size, NULL) == -1)
+                        return rd_kafka_last_error();
+        }
+
+        return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+int main (int argc, char **argv) {
+	char *brokers = NULL;
+	char mode = 'C';
+	char *topic = NULL;
+	const char *key = NULL;
+        int *partitions = NULL;
+	int opt;
+	int sendflags = 0;
+	char *msgpattern = "librdkafka_performance testing!";
+	int msgsize = (int)strlen(msgpattern);
+	const char *debug = NULL;
+	rd_ts_t now;
+	char errstr[512];
+	uint64_t seq = 0;
+	int seed = (int)time(NULL);
+        rd_kafka_t *rk;
+	rd_kafka_topic_t *rkt;
+	rd_kafka_conf_t *conf;
+	rd_kafka_topic_conf_t *topic_conf;
+	rd_kafka_queue_t *rkqu = NULL;
+	const char *compression = "no";
+	int64_t start_offset = 0;
+	int batch_size = 0;
+	int idle = 0;
+        const char *stats_cmd = NULL;
+        char *stats_intvlstr = NULL;
+        char tmp[128];
+        char *tmp2;
+        int otype = _OTYPE_SUMMARY;
+        double dtmp;
+        int rate_sleep = 0;
+	rd_kafka_topic_partition_list_t *topics;
+        int exitcode = 0;
+        rd_kafka_headers_t *hdrs = NULL;
+        rd_kafka_resp_err_t err;
+
+	/* Kafka configuration */
+	conf = rd_kafka_conf_new();
+	rd_kafka_conf_set_error_cb(conf, err_cb);
+	rd_kafka_conf_set_throttle_cb(conf, throttle_cb);
+        rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb);
+
+#ifdef SIGIO
+        /* Quick termination */
+	rd_snprintf(tmp, sizeof(tmp), "%i", SIGIO);
+	rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
+#endif
+
+	/* Producer config */
+	rd_kafka_conf_set(conf, "queue.buffering.max.messages", "500000",
+			  NULL, 0);
+	rd_kafka_conf_set(conf, "message.send.max.retries", "3", NULL, 0);
+	rd_kafka_conf_set(conf, "retry.backoff.ms", "500", NULL, 0);
+
+	/* Consumer config */
+	/* Tell rdkafka to (try to) maintain 1M messages
+	 * in its internal receive buffers. This is to avoid
+	 * application -> rdkafka -> broker  per-message ping-pong
+	 * latency.
+	 * The larger the local queue, the higher the performance.
+	 * Try other values with: ... -X queued.min.messages=1000
+	 */
+	rd_kafka_conf_set(conf, "queued.min.messages", "1000000", NULL, 0);
+	rd_kafka_conf_set(conf, "session.timeout.ms", "6000", NULL, 0);
+
+	/* Kafka topic configuration */
+	topic_conf = rd_kafka_topic_conf_new();
+	rd_kafka_topic_conf_set(topic_conf, "auto.offset.reset", "earliest",
+				NULL, 0);
+
+	topics = rd_kafka_topic_partition_list_new(1);
+
+	while ((opt =
+		getopt(argc, argv,
+		       "PCG:t:p:b:s:k:c:fi:MDd:m:S:x:"
+                       "R:a:z:o:X:B:eT:Y:qvIur:lA:OwNHH:")) != -1) {
+		switch (opt) {
+		case 'G':
+			if (rd_kafka_conf_set(conf, "group.id", optarg,
+					      errstr, sizeof(errstr)) !=
+			    RD_KAFKA_CONF_OK) {
+				fprintf(stderr, "%% %s\n", errstr);
+				exit(1);
+			}
+			/* FALLTHRU */
+		case 'P':
+		case 'C':
+			mode = opt;
+			break;
+		case 't':
+			rd_kafka_topic_partition_list_add(topics, optarg,
+							  RD_KAFKA_PARTITION_UA);
+			break;
+		case 'p':
+                        partition_cnt++;
+			partitions = realloc(partitions, sizeof(*partitions) * partition_cnt);
+			partitions[partition_cnt-1] = atoi(optarg);
+			break;
+
+		case 'b':
+			brokers = optarg;
+			break;
+		case 's':
+			msgsize = atoi(optarg);
+			break;
+		case 'k':
+			key = optarg;
+			break;
+		case 'c':
+			msgcnt = atoi(optarg);
+			break;
+		case 'D':
+			sendflags |= RD_KAFKA_MSG_F_FREE;
+			break;
+		case 'i':
+			dispintvl = atoi(optarg);
+			break;
+		case 'm':
+			msgpattern = optarg;
+			break;
+		case 'S':
+			seq = strtoull(optarg, NULL, 10);
+			do_seq = 1;
+			break;
+		case 'x':
+			exit_after = atoi(optarg);
+			break;
+		case 'R':
+			seed = atoi(optarg);
+			break;
+		case 'a':
+			if (rd_kafka_topic_conf_set(topic_conf,
+						    "request.required.acks",
+						    optarg,
+						    errstr, sizeof(errstr)) !=
+			    RD_KAFKA_CONF_OK) {
+				fprintf(stderr, "%% %s\n", errstr);
+				exit(1);
+			}
+			break;
+		case 'B':
+			batch_size = atoi(optarg);
+			break;
+		case 'z':
+			if (rd_kafka_conf_set(conf, "compression.codec",
+					      optarg,
+					      errstr, sizeof(errstr)) !=
+			    RD_KAFKA_CONF_OK) {
+				fprintf(stderr, "%% %s\n", errstr);
+				exit(1);
+			}
+			compression = optarg;
+			break;
+		case 'o':
+			if (!strcmp(optarg, "end"))
+				start_offset = RD_KAFKA_OFFSET_END;
+			else if (!strcmp(optarg, "beginning"))
+				start_offset = RD_KAFKA_OFFSET_BEGINNING;
+			else if (!strcmp(optarg, "stored"))
+				start_offset = RD_KAFKA_OFFSET_STORED;
+			else {
+				start_offset = strtoll(optarg, NULL, 10);
+
+				if (start_offset < 0)
+					start_offset = RD_KAFKA_OFFSET_TAIL(-start_offset);
+			}
+
+			break;
+		case 'e':
+			exit_eof = 1;
+			break;
+		case 'd':
+			debug = optarg;
+			break;
+                case 'H':
+                {
+                        char *name, *val;
+                        size_t name_sz = -1;
+
+                        if (!optarg) {
+                                read_hdrs = 1;
+                                break;
+                        }
+
+                        name = optarg;
+                        val = strchr(name, '=');
+                        if (val) {
+                                name_sz = (size_t)(val-name);
+                                val++; /* past the '=' */
+                        }
+
+                        if (!hdrs)
+                                hdrs = rd_kafka_headers_new(8);
+
+                        err = rd_kafka_header_add(hdrs, name, name_sz, val, -1);
+                        if (err) {
+                                fprintf(stderr,
+                                        "%% Failed to add header %s: %s\n",
+                                        name, rd_kafka_err2str(err));
+                                exit(1);
+                        }
+
+                        read_hdrs = 1;
+                }
+                break;
+		case 'X':
+		{
+			char *name, *val;
+			rd_kafka_conf_res_t res;
+
+			if (!strcmp(optarg, "list") ||
+			    !strcmp(optarg, "help")) {
+				rd_kafka_conf_properties_show(stdout);
+				exit(0);
+			}
+
+			name = optarg;
+			if (!(val = strchr(name, '='))) {
+				fprintf(stderr, "%% Expected "
+					"-X property=value, not %s\n", name);
+				exit(1);
+			}
+
+			*val = '\0';
+			val++;
+
+                        if (!strcmp(name, "file")) {
+                                if (read_conf_file(conf, topic_conf, val) == -1)
+                                        exit(1);
+                                break;
+                        }
+
+			res = RD_KAFKA_CONF_UNKNOWN;
+			/* Try "topic." prefixed properties on topic
+			 * conf first, and then fall through to global if
+			 * it didnt match a topic configuration property. */
+			if (!strncmp(name, "topic.", strlen("topic.")))
+				res = rd_kafka_topic_conf_set(topic_conf,
+							      name+
+							      strlen("topic."),
+							      val,
+							      errstr,
+							      sizeof(errstr));
+
+			if (res == RD_KAFKA_CONF_UNKNOWN)
+				res = rd_kafka_conf_set(conf, name, val,
+							errstr, sizeof(errstr));
+
+			if (res != RD_KAFKA_CONF_OK) {
+				fprintf(stderr, "%% %s\n", errstr);
+				exit(1);
+			}
+		}
+		break;
+
+		case 'T':
+                        stats_intvlstr = optarg;
+			break;
+                case 'Y':
+                        stats_cmd = optarg;
+                        break;
+
+		case 'q':
+                        verbosity--;
+			break;
+
+		case 'v':
+                        verbosity++;
+			break;
+
+		case 'I':
+			idle = 1;
+			break;
+
+                case 'u':
+                        otype = _OTYPE_TAB;
+                        verbosity--; /* remove some fluff */
+                        break;
+
+                case 'r':
+                        dtmp = strtod(optarg, &tmp2);
+                        if (tmp2 == optarg ||
+                            (dtmp >= -0.001 && dtmp <= 0.001)) {
+                                fprintf(stderr, "%% Invalid rate: %s\n",
+                                        optarg);
+                                exit(1);
+                        }
+
+                        rate_sleep = (int)(1000000.0 / dtmp);
+                        break;
+
+                case 'l':
+                        latency_mode = 1;
+			break;
+
+		case 'A':
+			if (!(latency_fp = fopen(optarg, "w"))) {
+				fprintf(stderr,
+					"%% Cant open %s: %s\n",
+					optarg, strerror(errno));
+				exit(1);
+			}
+                        break;
+
+                case 'O':
+                        if (rd_kafka_topic_conf_set(topic_conf,
+                                                    "produce.offset.report",
+                                                    "true",
+                                                    errstr, sizeof(errstr)) !=
+                            RD_KAFKA_CONF_OK) {
+                                fprintf(stderr, "%% %s\n", errstr);
+                                exit(1);
+                        }
+                        report_offset = 1;
+                        break;
+
+		case 'M':
+			incremental_mode = 1;
+			break;
+
+		case 'N':
+			with_dr = 0;
+			break;
+
+		default:
+                        fprintf(stderr, "Unknown option: %c\n", opt);
+			goto usage;
+		}
+	}
+
+	if (topics->cnt == 0 || optind != argc) {
+                if (optind < argc)
+                        fprintf(stderr, "Unknown argument: %s\n", argv[optind]);
+	usage:
+		fprintf(stderr,
+			"Usage: %s [-C|-P] -t <topic> "
+			"[-p <partition>] [-b <broker,broker..>] [options..]\n"
+			"\n"
+			"librdkafka version %s (0x%08x)\n"
+			"\n"
+			" Options:\n"
+			"  -C | -P |    Consumer or Producer mode\n"
+			"  -G <groupid> High-level Kafka Consumer mode\n"
+			"  -t <topic>   Topic to consume / produce\n"
+			"  -p <num>     Partition (defaults to random). "
+			"Multiple partitions are allowed in -C consumer mode.\n"
+			"  -M           Print consumer interval stats\n"
+			"  -b <brokers> Broker address list (host[:port],..)\n"
+			"  -s <size>    Message size (producer)\n"
+			"  -k <key>     Message key (producer)\n"
+                        "  -H <name[=value]> Add header to message (producer)\n"
+                        "  -H           Read message headers (consumer)\n"
+			"  -c <cnt>     Messages to transmit/receive\n"
+			"  -x <cnt>     Hard exit after transmitting <cnt> messages (producer)\n"
+			"  -D           Copy/Duplicate data buffer (producer)\n"
+			"  -i <ms>      Display interval\n"
+			"  -m <msg>     Message payload pattern\n"
+			"  -S <start>   Send a sequence number starting at "
+			"<start> as payload\n"
+			"  -R <seed>    Random seed value (defaults to time)\n"
+			"  -a <acks>    Required acks (producer): "
+			"-1, 0, 1, >1\n"
+			"  -B <size>    Consume batch size (# of msgs)\n"
+			"  -z <codec>   Enable compression:\n"
+			"               none|gzip|snappy\n"
+			"  -o <offset>  Start offset (consumer)\n"
+			"               beginning, end, NNNNN or -NNNNN\n"
+			"  -d [facs..]  Enable debugging contexts:\n"
+			"               %s\n"
+			"  -X <prop=name> Set arbitrary librdkafka "
+			"configuration property\n"
+			"               Properties prefixed with \"topic.\" "
+			"will be set on topic object.\n"
+			"               Use '-X list' to see the full list\n"
+			"               of supported properties.\n"
+                        "  -X file=<path> Read config from file.\n"
+			"  -T <intvl>   Enable statistics from librdkafka at "
+			"specified interval (ms)\n"
+                        "  -Y <command> Pipe statistics to <command>\n"
+			"  -I           Idle: dont produce any messages\n"
+			"  -q           Decrease verbosity\n"
+                        "  -v           Increase verbosity (default 1)\n"
+                        "  -u           Output stats in table format\n"
+                        "  -r <rate>    Producer msg/s limit\n"
+                        "  -l           Latency measurement.\n"
+                        "               Needs two matching instances, one\n"
+                        "               consumer and one producer, both\n"
+                        "               running with the -l switch.\n"
+                        "  -l           Producer: per-message latency stats\n"
+			"  -A <file>    Write per-message latency stats to "
+			"<file>. Requires -l\n"
+                        "  -O           Report produced offset (producer)\n"
+			"  -N           No delivery reports (producer)\n"
+			"\n"
+			" In Consumer mode:\n"
+			"  consumes messages and prints thruput\n"
+			"  If -B <..> is supplied the batch consumer\n"
+			"  mode is used, else the callback mode is used.\n"
+			"\n"
+			" In Producer mode:\n"
+			"  writes messages of size -s <..> and prints thruput\n"
+			"\n",
+			argv[0],
+			rd_kafka_version_str(), rd_kafka_version(),
+			RD_KAFKA_DEBUG_CONTEXTS);
+		exit(1);
+	}
+
+
+	dispintvl *= 1000; /* us */
+
+        if (verbosity > 1)
+                printf("%% Using random seed %i, verbosity level %i\n",
+                       seed, verbosity);
+	srand(seed);
+	signal(SIGINT, stop);
+#ifdef SIGUSR1
+	signal(SIGUSR1, sig_usr1);
+#endif
+
+
+	if (debug &&
+	    rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) !=
+	    RD_KAFKA_CONF_OK) {
+		printf("%% Debug configuration failed: %s: %s\n",
+		       errstr, debug);
+		exit(1);
+	}
+
+        /* Always enable stats (for RTT extraction), and if user supplied
+         * the -T <intvl> option we let her take part of the stats aswell. */
+        rd_kafka_conf_set_stats_cb(conf, stats_cb);
+
+        if (!stats_intvlstr) {
+                /* if no user-desired stats, adjust stats interval
+                 * to the display interval. */
+                rd_snprintf(tmp, sizeof(tmp), "%"PRId64, dispintvl / 1000);
+        }
+
+        if (rd_kafka_conf_set(conf, "statistics.interval.ms",
+                              stats_intvlstr ? stats_intvlstr : tmp,
+                              errstr, sizeof(errstr)) !=
+            RD_KAFKA_CONF_OK) {
+                fprintf(stderr, "%% %s\n", errstr);
+                exit(1);
+        }
+
+        if (latency_mode)
+                do_seq = 0;
+
+        if (stats_intvlstr) {
+                /* User enabled stats (-T) */
+
+#ifndef _MSC_VER
+                if (stats_cmd) {
+                        if (!(stats_fp = popen(stats_cmd, "we"))) {
+                                fprintf(stderr,
+                                        "%% Failed to start stats command: "
+                                        "%s: %s", stats_cmd, strerror(errno));
+                                exit(1);
+                        }
+                } else
+#endif
+                        stats_fp = stdout;
+        }
+
+	if (msgcnt != -1)
+		forever = 0;
+
+	topic = topics->elems[0].topic;
+
+	if (mode == 'P') {
+		/*
+		 * Producer
+		 */
+		char *sbuf;
+		char *pbuf;
+		int outq;
+		int keylen = key ? (int)strlen(key) : 0;
+		off_t rof = 0;
+		size_t plen = strlen(msgpattern);
+		int partition = partitions ? partitions[0] :
+			RD_KAFKA_PARTITION_UA;
+
+                if (latency_mode) {
+                        int minlen = (int)(strlen("LATENCY:") +
+                                           strlen("18446744073709551615 ")+1);
+                        msgsize = RD_MAX(minlen, msgsize);
+                        sendflags |= RD_KAFKA_MSG_F_COPY;
+		} else if (do_seq) {
+                        int minlen = (int)strlen("18446744073709551615 ")+1;
+                        if (msgsize < minlen)
+                                msgsize = minlen;
+
+			/* Force duplication of payload */
+                        sendflags |= RD_KAFKA_MSG_F_FREE;
+		}
+
+		sbuf = malloc(msgsize);
+
+		/* Copy payload content to new buffer */
+		while (rof < msgsize) {
+			size_t xlen = RD_MIN((size_t)msgsize-rof, plen);
+			memcpy(sbuf+rof, msgpattern, xlen);
+			rof += (off_t)xlen;
+		}
+
+		if (msgcnt == -1)
+			printf("%% Sending messages of size %i bytes\n",
+			       msgsize);
+		else
+			printf("%% Sending %i messages of size %i bytes\n",
+			       msgcnt, msgsize);
+
+		if (with_dr)
+			rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered);
+
+		/* Create Kafka handle */
+		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
+ 					errstr, sizeof(errstr)))) {
+			fprintf(stderr,
+				"%% Failed to create Kafka producer: %s\n",
+				errstr);
+			exit(1);
+		}
+
+                global_rk = rk;
+
+		/* Add broker(s) */
+		if (brokers && rd_kafka_brokers_add(rk, brokers) < 1) {
+			fprintf(stderr, "%% No valid brokers specified\n");
+			exit(1);
+		}
+
+		/* Explicitly create topic to avoid per-msg lookups. */
+		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+
+
+                if (rate_sleep && verbosity >= 2)
+                        fprintf(stderr,
+                                "%% Inter message rate limiter sleep %ius\n",
+                                rate_sleep);
+
+                dr_disp_div = msgcnt / 50;
+                if (dr_disp_div == 0)
+                        dr_disp_div = 10;
+
+		cnt.t_start = cnt.t_last = rd_clock();
+
+		msgs_wait_produce_cnt = msgcnt;
+
+		while (run && (msgcnt == -1 || (int)cnt.msgs < msgcnt)) {
+			/* Send/Produce message. */
+
+			if (idle) {
+				rd_kafka_poll(rk, 1000);
+				continue;
+			}
+
+                        if (latency_mode) {
+                                rd_snprintf(sbuf, msgsize-1,
+                                         "LATENCY:%"PRIu64,  wall_clock());
+                        } else if (do_seq) {
+                                rd_snprintf(sbuf,
+                                         msgsize-1, "%"PRIu64": ", seq);
+                                seq++;
+			}
+
+			if (sendflags & RD_KAFKA_MSG_F_FREE) {
+				/* Duplicate memory */
+				pbuf = malloc(msgsize);
+				memcpy(pbuf, sbuf, msgsize);
+			} else
+				pbuf = sbuf;
+
+                        if (msgsize == 0)
+                                pbuf = NULL;
+
+			cnt.tx++;
+			while (run &&
+                               (err = do_produce(rk, rkt, partition, sendflags,
+                                                 pbuf, msgsize,
+                                                 key, keylen, hdrs))) {
+				if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
+					printf("%% No such partition: "
+						   "%"PRId32"\n", partition);
+				else if (verbosity >= 3 ||
+					(err != RD_KAFKA_RESP_ERR__QUEUE_FULL && verbosity >= 1))
+					printf("%% produce error: %s%s\n",
+						   rd_kafka_err2str(err),
+						   err == RD_KAFKA_RESP_ERR__QUEUE_FULL ?
+						   " (backpressure)" : "");
+
+				cnt.tx_err++;
+				if (err != RD_KAFKA_RESP_ERR__QUEUE_FULL) {
+					run = 0;
+					break;
+				}
+				now = rd_clock();
+				if (verbosity >= 2 &&
+                                    cnt.t_enobufs_last + dispintvl <= now) {
+					printf("%% Backpressure %i "
+					       "(tx %"PRIu64", "
+					       "txerr %"PRIu64")\n",
+					       rd_kafka_outq_len(rk),
+					       cnt.tx, cnt.tx_err);
+					cnt.t_enobufs_last = now;
+				}
+
+				/* Poll to handle delivery reports */
+				rd_kafka_poll(rk, 10);
+
+                                print_stats(rk, mode, otype, compression);
+			}
+
+			msgs_wait_cnt++;
+			if (msgs_wait_produce_cnt != -1)
+				msgs_wait_produce_cnt--;
+			cnt.msgs++;
+			cnt.bytes += msgsize;
+
+                        if (rate_sleep) {
+				if (rate_sleep > 100) {
+#ifdef _MSC_VER
+					Sleep(rate_sleep / 1000);
+#else
+					usleep(rate_sleep);
+#endif
+				} else {
+					rd_ts_t next = rd_clock() + rate_sleep;
+					while (next > rd_clock())
+						;
+				}
+                        }
+
+			/* Must poll to handle delivery reports */
+			rd_kafka_poll(rk, 0);
+
+			print_stats(rk, mode, otype, compression);
+		}
+
+		forever = 0;
+                if (verbosity >= 2)
+                        printf("%% All messages produced, "
+                               "now waiting for %li deliveries\n",
+                               msgs_wait_cnt);
+
+		/* Wait for messages to be delivered */
+                while (run && rd_kafka_poll(rk, 1000) != -1)
+			print_stats(rk, mode, otype, compression);
+
+
+		outq = rd_kafka_outq_len(rk);
+                if (verbosity >= 2)
+                        printf("%% %i messages in outq\n", outq);
+		cnt.msgs -= outq;
+		cnt.bytes -= msgsize * outq;
+
+		cnt.t_end = t_end;
+
+		if (cnt.tx_err > 0)
+			printf("%% %"PRIu64" backpressures for %"PRIu64
+			       " produce calls: %.3f%% backpressure rate\n",
+			       cnt.tx_err, cnt.tx,
+			       ((double)cnt.tx_err / (double)cnt.tx) * 100.0);
+
+		/* Destroy topic */
+		rd_kafka_topic_destroy(rkt);
+
+		/* Destroy the handle */
+		rd_kafka_destroy(rk);
+                global_rk = rk = NULL;
+
+		free(sbuf);
+
+                exitcode = cnt.msgs == cnt.msgs_dr_ok ? 0 : 1;
+
+	} else if (mode == 'C') {
+		/*
+		 * Consumer
+		 */
+
+		rd_kafka_message_t **rkmessages = NULL;
+		size_t i = 0;
+
+		/* Create Kafka handle */
+		if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
+					errstr, sizeof(errstr)))) {
+			fprintf(stderr,
+				"%% Failed to create Kafka consumer: %s\n",
+				errstr);
+			exit(1);
+		}
+
+                global_rk = rk;
+
+		/* Add broker(s) */
+		if (brokers && rd_kafka_brokers_add(rk, brokers) < 1) {
+			fprintf(stderr, "%% No valid brokers specified\n");
+			exit(1);
+		}
+
+		/* Create topic to consume from */
+		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+
+		/* Batch consumer */
+		if (batch_size)
+			rkmessages = malloc(sizeof(*rkmessages) * batch_size);
+
+		/* Start consuming */
+		rkqu = rd_kafka_queue_new(rk);
+		for (i=0 ; i<(size_t)partition_cnt ; ++i) {
+			const int r = rd_kafka_consume_start_queue(rkt,
+				partitions[i], start_offset, rkqu);
+
+			if (r == -1) {
+                                fprintf(stderr, "%% Error creating queue: %s\n",
+                                        rd_kafka_err2str(rd_kafka_last_error()));
+				exit(1);
+			}
+		}
+
+		while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) {
+			/* Consume messages.
+			 * A message may either be a real message, or
+			 * an error signaling (if rkmessage->err is set).
+			 */
+			uint64_t fetch_latency;
+			ssize_t r;
+
+			fetch_latency = rd_clock();
+
+			if (batch_size) {
+				int i;
+				int partition = partitions ? partitions[0] :
+				    RD_KAFKA_PARTITION_UA;
+
+				/* Batch fetch mode */
+				r = rd_kafka_consume_batch(rkt, partition,
+							   1000,
+							   rkmessages,
+							   batch_size);
+				if (r != -1) {
+					for (i = 0 ; i < r ; i++) {
+						msg_consume(rkmessages[i],
+							NULL);
+						rd_kafka_message_destroy(
+							rkmessages[i]);
+					}
+				}
+			} else {
+				/* Queue mode */
+				r = rd_kafka_consume_callback_queue(rkqu, 1000,
+							msg_consume,
+							NULL);
+			}
+
+			cnt.t_fetch_latency += rd_clock() - fetch_latency;
+                        if (r == -1)
+                                fprintf(stderr, "%% Error: %s\n",
+                                        rd_kafka_err2str(rd_kafka_last_error()));
+
+			print_stats(rk, mode, otype, compression);
+
+			/* Poll to handle stats callbacks */
+			rd_kafka_poll(rk, 0);
+		}
+		cnt.t_end = rd_clock();
+
+		/* Stop consuming */
+		for (i=0 ; i<(size_t)partition_cnt ; ++i) {
+			int r = rd_kafka_consume_stop(rkt, (int32_t)i);
+			if (r == -1) {
+                                fprintf(stderr,
+                                        "%% Error in consume_stop: %s\n",
+                                        rd_kafka_err2str(rd_kafka_last_error()));
+			}
+		}
+		rd_kafka_queue_destroy(rkqu);
+
+		/* Destroy topic */
+		rd_kafka_topic_destroy(rkt);
+
+		if (batch_size)
+			free(rkmessages);
+
+		/* Destroy the handle */
+		rd_kafka_destroy(rk);
+
+                global_rk = rk = NULL;
+
+	} else if (mode == 'G') {
+		/*
+		 * High-level balanced Consumer
+		 */
+		rd_kafka_resp_err_t err;
+
+		rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
+		rd_kafka_conf_set_default_topic_conf(conf, topic_conf);
+
+		/* Create Kafka handle */
+		if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
+					errstr, sizeof(errstr)))) {
+			fprintf(stderr,
+				"%% Failed to create Kafka consumer: %s\n",
+				errstr);
+			exit(1);
+		}
+
+		/* Forward all events to consumer queue */
+		rd_kafka_poll_set_consumer(rk);
+
+                global_rk = rk;
+
+		/* Add broker(s) */
+		if (brokers && rd_kafka_brokers_add(rk, brokers) < 1) {
+			fprintf(stderr, "%% No valid brokers specified\n");
+			exit(1);
+		}
+
+		err = rd_kafka_subscribe(rk, topics);
+		if (err) {
+			fprintf(stderr, "%% Subscribe failed: %s\n",
+				rd_kafka_err2str(err));
+			exit(1);
+		}
+		fprintf(stderr, "%% Waiting for group rebalance..\n");
+
+		while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) {
+			/* Consume messages.
+			 * A message may either be a real message, or
+			 * an event (if rkmessage->err is set).
+			 */
+			rd_kafka_message_t *rkmessage;
+			uint64_t fetch_latency;
+
+			fetch_latency = rd_clock();
+
+			rkmessage = rd_kafka_consumer_poll(rk, 1000);
+			if (rkmessage) {
+				msg_consume(rkmessage, NULL);
+				rd_kafka_message_destroy(rkmessage);
+			}
+
+			cnt.t_fetch_latency += rd_clock() - fetch_latency;
+
+			print_stats(rk, mode, otype, compression);
+		}
+		cnt.t_end = rd_clock();
+
+		err = rd_kafka_consumer_close(rk);
+		if (err)
+			fprintf(stderr, "%% Failed to close consumer: %s\n",
+				rd_kafka_err2str(err));
+
+		rd_kafka_destroy(rk);
+	}
+
+        if (hdrs)
+                rd_kafka_headers_destroy(hdrs);
+
+	print_stats(NULL, mode, otype|_OTYPE_FORCE, compression);
+
+	if (cnt.t_fetch_latency && cnt.msgs)
+		printf("%% Average application fetch latency: %"PRIu64"us\n",
+		       cnt.t_fetch_latency / cnt.msgs);
+
+	if (latency_fp)
+		fclose(latency_fp);
+
+        if (stats_fp) {
+#ifndef _MSC_VER
+                pclose(stats_fp);
+#endif
+                stats_fp = NULL;
+        }
+
+        if (partitions)
+                free(partitions);
+
+	rd_kafka_topic_partition_list_destroy(topics);
+
+	/* Let background threads clean up and terminate cleanly. */
+	rd_kafka_wait_destroyed(2000);
+
+	return exitcode;
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/examples/rdkafka_simple_producer.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/examples/rdkafka_simple_producer.c b/thirdparty/librdkafka-0.11.4/examples/rdkafka_simple_producer.c
new file mode 100644
index 0000000..a353d01
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/examples/rdkafka_simple_producer.c
@@ -0,0 +1,260 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Simple Apache Kafka producer
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+static int run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop (int sig) {
+        run = 0;
+        fclose(stdin); /* abort fgets() */
+}
+
+
+/**
+ * @brief Message delivery report callback.
+ *
+ * This callback is called exactly once per message, indicating if
+ * the message was succesfully delivered
+ * (rkmessage->err == RD_KAFKA_RESP_ERR_NO_ERROR) or permanently
+ * failed delivery (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR).
+ *
+ * The callback is triggered from rd_kafka_poll() and executes on
+ * the application's thread.
+ */
+static void dr_msg_cb (rd_kafka_t *rk,
+                       const rd_kafka_message_t *rkmessage, void *opaque) {
+        if (rkmessage->err)
+                fprintf(stderr, "%% Message delivery failed: %s\n",
+                        rd_kafka_err2str(rkmessage->err));
+        else
+                fprintf(stderr,
+                        "%% Message delivered (%zd bytes, "
+                        "partition %"PRId32")\n",
+                        rkmessage->len, rkmessage->partition);
+
+        /* The rkmessage is destroyed automatically by librdkafka */
+}
+
+
+
+int main (int argc, char **argv) {
+        rd_kafka_t *rk;         /* Producer instance handle */
+        rd_kafka_topic_t *rkt;  /* Topic object */
+        rd_kafka_conf_t *conf;  /* Temporary configuration object */
+        char errstr[512];       /* librdkafka API error reporting buffer */
+        char buf[512];          /* Message value temporary buffer */
+        const char *brokers;    /* Argument: broker list */
+        const char *topic;      /* Argument: topic to produce to */
+
+        /*
+         * Argument validation
+         */
+        if (argc != 3) {
+                fprintf(stderr, "%% Usage: %s <broker> <topic>\n", argv[0]);
+                return 1;
+        }
+
+        brokers = argv[1];
+        topic   = argv[2];
+
+
+        /*
+         * Create Kafka client configuration place-holder
+         */
+        conf = rd_kafka_conf_new();
+
+        /* Set bootstrap broker(s) as a comma-separated list of
+         * host or host:port (default port 9092).
+         * librdkafka will use the bootstrap brokers to acquire the full
+         * set of brokers from the cluster. */
+        if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers,
+                              errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+                fprintf(stderr, "%s\n", errstr);
+                return 1;
+        }
+
+        /* Set the delivery report callback.
+         * This callback will be called once per message to inform
+         * the application if delivery succeeded or failed.
+         * See dr_msg_cb() above. */
+        rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+
+
+        /*
+         * Create producer instance.
+         *
+         * NOTE: rd_kafka_new() takes ownership of the conf object
+         *       and the application must not reference it again after
+         *       this call.
+         */
+        rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+        if (!rk) {
+                fprintf(stderr,
+                        "%% Failed to create new producer: %s\n", errstr);
+                return 1;
+        }
+
+
+        /* Create topic object that will be reused for each message
+         * produced.
+         *
+         * Both the producer instance (rd_kafka_t) and topic objects (topic_t)
+         * are long-lived objects that should be reused as much as possible.
+         */
+        rkt = rd_kafka_topic_new(rk, topic, NULL);
+        if (!rkt) {
+                fprintf(stderr, "%% Failed to create topic object: %s\n",
+                        rd_kafka_err2str(rd_kafka_last_error()));
+                rd_kafka_destroy(rk);
+                return 1;
+        }
+
+        /* Signal handler for clean shutdown */
+        signal(SIGINT, stop);
+
+        fprintf(stderr,
+                "%% Type some text and hit enter to produce message\n"
+                "%% Or just hit enter to only serve delivery reports\n"
+                "%% Press Ctrl-C or Ctrl-D to exit\n");
+
+        while (run && fgets(buf, sizeof(buf), stdin)) {
+                size_t len = strlen(buf);
+
+                if (buf[len-1] == '\n') /* Remove newline */
+                        buf[--len] = '\0';
+
+                if (len == 0) {
+                        /* Empty line: only serve delivery reports */
+                        rd_kafka_poll(rk, 0/*non-blocking */);
+                        continue;
+                }
+
+                /*
+                 * Send/Produce message.
+                 * This is an asynchronous call, on success it will only
+                 * enqueue the message on the internal producer queue.
+                 * The actual delivery attempts to the broker are handled
+                 * by background threads.
+                 * The previously registered delivery report callback
+                 * (dr_msg_cb) is used to signal back to the application
+                 * when the message has been delivered (or failed).
+                 */
+        retry:
+                if (rd_kafka_produce(
+                            /* Topic object */
+                            rkt,
+                            /* Use builtin partitioner to select partition*/
+                            RD_KAFKA_PARTITION_UA,
+                            /* Make a copy of the payload. */
+                            RD_KAFKA_MSG_F_COPY,
+                            /* Message payload (value) and length */
+                            buf, len,
+                            /* Optional key and its length */
+                            NULL, 0,
+                            /* Message opaque, provided in
+                             * delivery report callback as
+                             * msg_opaque. */
+                            NULL) == -1) {
+                        /**
+                         * Failed to *enqueue* message for producing.
+                         */
+                        fprintf(stderr,
+                                "%% Failed to produce to topic %s: %s\n",
+                                rd_kafka_topic_name(rkt),
+                                rd_kafka_err2str(rd_kafka_last_error()));
+
+                        /* Poll to handle delivery reports */
+                        if (rd_kafka_last_error() ==
+                            RD_KAFKA_RESP_ERR__QUEUE_FULL) {
+                                /* If the internal queue is full, wait for
+                                 * messages to be delivered and then retry.
+                                 * The internal queue represents both
+                                 * messages to be sent and messages that have
+                                 * been sent or failed, awaiting their
+                                 * delivery report callback to be called.
+                                 *
+                                 * The internal queue is limited by the
+                                 * configuration property
+                                 * queue.buffering.max.messages */
+                                rd_kafka_poll(rk, 1000/*block for max 1000ms*/);
+                                goto retry;
+                        }
+                } else {
+                        fprintf(stderr, "%% Enqueued message (%zd bytes) "
+                                "for topic %s\n",
+                                len, rd_kafka_topic_name(rkt));
+                }
+
+
+                /* A producer application should continually serve
+                 * the delivery report queue by calling rd_kafka_poll()
+                 * at frequent intervals.
+                 * Either put the poll call in your main loop, or in a
+                 * dedicated thread, or call it after every
+                 * rd_kafka_produce() call.
+                 * Just make sure that rd_kafka_poll() is still called
+                 * during periods where you are not producing any messages
+                 * to make sure previously produced messages have their
+                 * delivery report callback served (and any other callbacks
+                 * you register). */
+                rd_kafka_poll(rk, 0/*non-blocking*/);
+        }
+
+
+        /* Wait for final messages to be delivered or fail.
+         * rd_kafka_flush() is an abstraction over rd_kafka_poll() which
+         * waits for all messages to be delivered. */
+        fprintf(stderr, "%% Flushing final messages..\n");
+        rd_kafka_flush(rk, 10*1000 /* wait for max 10 seconds */);
+
+        /* Destroy topic object */
+        rd_kafka_topic_destroy(rkt);
+
+        /* Destroy the producer instance */
+        rd_kafka_destroy(rk);
+
+        return 0;
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/examples/rdkafka_zookeeper_example.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/examples/rdkafka_zookeeper_example.c b/thirdparty/librdkafka-0.11.4/examples/rdkafka_zookeeper_example.c
new file mode 100644
index 0000000..2f9a61e
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/examples/rdkafka_zookeeper_example.c
@@ -0,0 +1,728 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met: 
+ * 
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer. 
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution. 
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer example programs
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <ctype.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <syslog.h>
+#include <sys/time.h>
+#include <errno.h>
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"  /* for Kafka driver */
+
+#include <zookeeper.h>
+#include <zookeeper.jute.h>
+#include <jansson.h>
+
+#define BROKER_PATH "/brokers/ids"
+
+static int run = 1;
+static rd_kafka_t *rk;
+static int exit_eof = 0;
+static int quiet = 0;
+static 	enum {
+	OUTPUT_HEXDUMP,
+	OUTPUT_RAW,
+} output = OUTPUT_HEXDUMP;
+
+static void stop (int sig) {
+	run = 0;
+	fclose(stdin); /* abort fgets() */
+}
+
+
+static void hexdump (FILE *fp, const char *name, const void *ptr, size_t len) {
+	const char *p = (const char *)ptr;
+	int of = 0;
+
+
+	if (name)
+		fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
+
+	for (of = 0 ; of < len ; of += 16) {
+		char hexen[16*3+1];
+		char charen[16+1];
+		int hof = 0;
+
+		int cof = 0;
+		int i;
+
+		for (i = of ; i < of + 16 && i < len ; i++) {
+			hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff);
+			cof += sprintf(charen+cof, "%c",
+				       isprint((int)p[i]) ? p[i] : '.');
+		}
+		fprintf(fp, "%08x: %-48s %-16s\n",
+			of, hexen, charen);
+	}
+}
+
+/**
+ * Kafka logger callback (optional)
+ */
+static void logger (const rd_kafka_t *rk, int level,
+		    const char *fac, const char *buf) {
+	struct timeval tv;
+	gettimeofday(&tv, NULL);
+	fprintf(stderr, "%u.%03u RDKAFKA-%i-%s: %s: %s\n",
+		(int)tv.tv_sec, (int)(tv.tv_usec / 1000),
+		level, fac, rd_kafka_name(rk), buf);
+}
+
+/**
+ * Message delivery report callback.
+ * Called once for each message.
+ * See rdkafka.h for more information.
+ */
+static void msg_delivered (rd_kafka_t *rk,
+			   void *payload, size_t len,
+			   int error_code,
+			   void *opaque, void *msg_opaque) {
+
+	if (error_code)
+		fprintf(stderr, "%% Message delivery failed: %s\n",
+			rd_kafka_err2str(error_code));
+	else if (!quiet)
+		fprintf(stderr, "%% Message delivered (%zd bytes)\n", len);
+}
+
+
+static void msg_consume (rd_kafka_message_t *rkmessage,
+			 void *opaque) {
+	if (rkmessage->err) {
+		if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+			fprintf(stderr,
+				"%% Consumer reached end of %s [%"PRId32"] "
+			       "message queue at offset %"PRId64"\n",
+			       rd_kafka_topic_name(rkmessage->rkt),
+			       rkmessage->partition, rkmessage->offset);
+
+			if (exit_eof)
+				run = 0;
+
+			return;
+		}
+
+		fprintf(stderr, "%% Consume error for topic \"%s\" [%"PRId32"] "
+		       "offset %"PRId64": %s\n",
+		       rd_kafka_topic_name(rkmessage->rkt),
+		       rkmessage->partition,
+		       rkmessage->offset,
+		       rd_kafka_message_errstr(rkmessage));
+		return;
+	}
+
+	if (!quiet)
+		fprintf(stdout, "%% Message (offset %"PRId64", %zd bytes):\n",
+			rkmessage->offset, rkmessage->len);
+
+	if (rkmessage->key_len) {
+		if (output == OUTPUT_HEXDUMP)
+			hexdump(stdout, "Message Key",
+				rkmessage->key, rkmessage->key_len);
+		else
+			printf("Key: %.*s\n",
+			       (int)rkmessage->key_len, (char *)rkmessage->key);
+	}
+
+	if (output == OUTPUT_HEXDUMP)
+		hexdump(stdout, "Message Payload",
+			rkmessage->payload, rkmessage->len);
+	else
+		printf("%.*s\n",
+		       (int)rkmessage->len, (char *)rkmessage->payload);
+}
+
+
+static void metadata_print (const char *topic,
+                            const struct rd_kafka_metadata *metadata) {
+        int i, j, k;
+
+        printf("Metadata for %s (from broker %"PRId32": %s):\n",
+               topic ? : "all topics",
+               metadata->orig_broker_id,
+               metadata->orig_broker_name);
+
+
+        /* Iterate brokers */
+        printf(" %i brokers:\n", metadata->broker_cnt);
+        for (i = 0 ; i < metadata->broker_cnt ; i++)
+                printf("  broker %"PRId32" at %s:%i\n",
+                       metadata->brokers[i].id,
+                       metadata->brokers[i].host,
+                       metadata->brokers[i].port);
+
+        /* Iterate topics */
+        printf(" %i topics:\n", metadata->topic_cnt);
+        for (i = 0 ; i < metadata->topic_cnt ; i++) {
+                const struct rd_kafka_metadata_topic *t = &metadata->topics[i];
+                printf("  topic \"%s\" with %i partitions:",
+                       t->topic,
+                       t->partition_cnt);
+                if (t->err) {
+                        printf(" %s", rd_kafka_err2str(t->err));
+                        if (t->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
+                                printf(" (try again)");
+                }
+                printf("\n");
+
+                /* Iterate topic's partitions */
+                for (j = 0 ; j < t->partition_cnt ; j++) {
+                        const struct rd_kafka_metadata_partition *p;
+                        p = &t->partitions[j];
+                        printf("    partition %"PRId32", "
+                               "leader %"PRId32", replicas: ",
+                               p->id, p->leader);
+
+                        /* Iterate partition's replicas */
+                        for (k = 0 ; k < p->replica_cnt ; k++)
+                                printf("%s%"PRId32,
+                                       k > 0 ? ",":"", p->replicas[k]);
+
+                        /* Iterate partition's ISRs */
+                        printf(", isrs: ");
+                        for (k = 0 ; k < p->isr_cnt ; k++)
+                                printf("%s%"PRId32,
+                                       k > 0 ? ",":"", p->isrs[k]);
+                        if (p->err)
+                                printf(", %s\n", rd_kafka_err2str(p->err));
+                        else
+                                printf("\n");
+                }
+        }
+}
+
+
+static void set_brokerlist_from_zookeeper(zhandle_t *zzh, char *brokers)
+{
+	if (zzh)
+	{
+		struct String_vector brokerlist;
+		if (zoo_get_children(zzh, BROKER_PATH, 1, &brokerlist) != ZOK)
+		{
+			fprintf(stderr, "No brokers found on path %s\n", BROKER_PATH);
+			return;
+		}
+
+		int i;
+		char *brokerptr = brokers;
+		for (i = 0; i < brokerlist.count; i++)
+		{
+			char path[255], cfg[1024];
+			sprintf(path, "/brokers/ids/%s", brokerlist.data[i]);
+			int len = sizeof(cfg);
+			zoo_get(zzh, path, 0, cfg, &len, NULL);
+
+			if (len > 0)
+			{
+				cfg[len] = '\0';
+				json_error_t jerror;
+				json_t *jobj = json_loads(cfg, 0, &jerror);
+				if (jobj)
+				{
+					json_t *jhost = json_object_get(jobj, "host");
+					json_t *jport = json_object_get(jobj, "port");
+
+					if (jhost && jport)
+					{
+						const char *host = json_string_value(jhost);
+						const int   port = json_integer_value(jport);
+						sprintf(brokerptr, "%s:%d", host, port);
+
+						brokerptr += strlen(brokerptr);
+						if (i < brokerlist.count - 1)
+						{
+							*brokerptr++ = ',';
+						}
+					}
+					json_decref(jobj);
+				}
+			}
+		}
+		deallocate_String_vector(&brokerlist);
+		printf("Found brokers %s\n", brokers);
+	}
+}
+
+
+static void watcher(zhandle_t *zh, int type, int state, const char *path, void *watcherCtx)
+{
+	char brokers[1024];
+	if (type == ZOO_CHILD_EVENT && strncmp(path, BROKER_PATH, sizeof(BROKER_PATH) - 1) == 0)
+	{
+		brokers[0] = '\0';
+		set_brokerlist_from_zookeeper(zh, brokers);
+		if (brokers[0] != '\0' && rk != NULL)
+		{
+			rd_kafka_brokers_add(rk, brokers);
+			rd_kafka_poll(rk, 10);
+		}
+	}
+}
+
+
+static zhandle_t* initialize_zookeeper(const char * zookeeper, const int debug)
+{
+	zhandle_t *zh;
+	if (debug)
+	{
+		zoo_set_debug_level(ZOO_LOG_LEVEL_DEBUG);
+	}
+	zh = zookeeper_init(zookeeper, watcher, 10000, 0, 0, 0);
+	if (zh == NULL)
+	{
+		fprintf(stderr, "Zookeeper connection not established.");
+		exit(1);
+	}
+	return zh;
+}
+
+
+static void sig_usr1 (int sig) {
+	rd_kafka_dump(stdout, rk);
+}
+
+int main (int argc, char **argv) {
+	rd_kafka_topic_t *rkt;
+	char *zookeeper = "localhost:2181";
+	zhandle_t *zh = NULL;
+	char brokers[1024];
+	char mode = 'C';
+	char *topic = NULL;
+	int partition = RD_KAFKA_PARTITION_UA;
+	int opt;
+	rd_kafka_conf_t *conf;
+	rd_kafka_topic_conf_t *topic_conf;
+	char errstr[512];
+	const char *debug = NULL;
+	int64_t start_offset = 0;
+	int do_conf_dump = 0;
+
+	memset(brokers, 0, sizeof(brokers));
+	quiet = !isatty(STDIN_FILENO);
+
+	/* Kafka configuration */
+	conf = rd_kafka_conf_new();
+
+	/* Topic configuration */
+	topic_conf = rd_kafka_topic_conf_new();
+
+	while ((opt = getopt(argc, argv, "PCLt:p:k:z:qd:o:eX:A")) != -1) {
+		switch (opt) {
+		case 'P':
+		case 'C':
+                case 'L':
+			mode = opt;
+			break;
+		case 't':
+			topic = optarg;
+			break;
+		case 'p':
+			partition = atoi(optarg);
+			break;
+		case 'k':
+			zookeeper = optarg;
+			break;
+		case 'z':
+			if (rd_kafka_conf_set(conf, "compression.codec",
+					      optarg,
+					      errstr, sizeof(errstr)) !=
+			    RD_KAFKA_CONF_OK) {
+				fprintf(stderr, "%% %s\n", errstr);
+				exit(1);
+			}
+			break;
+		case 'o':
+			if (!strcmp(optarg, "end"))
+				start_offset = RD_KAFKA_OFFSET_END;
+			else if (!strcmp(optarg, "beginning"))
+				start_offset = RD_KAFKA_OFFSET_BEGINNING;
+			else if (!strcmp(optarg, "stored"))
+				start_offset = RD_KAFKA_OFFSET_STORED;
+			else
+				start_offset = strtoll(optarg, NULL, 10);
+			break;
+		case 'e':
+			exit_eof = 1;
+			break;
+		case 'd':
+			debug = optarg;
+			break;
+		case 'q':
+			quiet = 1;
+			break;
+		case 'A':
+			output = OUTPUT_RAW;
+			break;
+		case 'X':
+		{
+			char *name, *val;
+			rd_kafka_conf_res_t res;
+
+			if (!strcmp(optarg, "list") ||
+			    !strcmp(optarg, "help")) {
+				rd_kafka_conf_properties_show(stdout);
+				exit(0);
+			}
+
+			if (!strcmp(optarg, "dump")) {
+				do_conf_dump = 1;
+				continue;
+			}
+
+			name = optarg;
+			if (!(val = strchr(name, '='))) {
+				fprintf(stderr, "%% Expected "
+					"-X property=value, not %s\n", name);
+				exit(1);
+			}
+
+			*val = '\0';
+			val++;
+
+			res = RD_KAFKA_CONF_UNKNOWN;
+			/* Try "topic." prefixed properties on topic
+			 * conf first, and then fall through to global if
+			 * it didnt match a topic configuration property. */
+			if (!strncmp(name, "topic.", strlen("topic.")))
+				res = rd_kafka_topic_conf_set(topic_conf,
+							      name+
+							      strlen("topic."),
+							      val,
+							      errstr,
+							      sizeof(errstr));
+
+			if (res == RD_KAFKA_CONF_UNKNOWN)
+				res = rd_kafka_conf_set(conf, name, val,
+							errstr, sizeof(errstr));
+
+			if (res != RD_KAFKA_CONF_OK) {
+				fprintf(stderr, "%% %s\n", errstr);
+				exit(1);
+			}
+		}
+		break;
+
+		default:
+			goto usage;
+		}
+	}
+
+
+	if (do_conf_dump) {
+		const char **arr;
+		size_t cnt;
+		int pass;
+
+		for (pass = 0 ; pass < 2 ; pass++) {
+			int i;
+
+			if (pass == 0) {
+				arr = rd_kafka_conf_dump(conf, &cnt);
+				printf("# Global config\n");
+			} else {
+				printf("# Topic config\n");
+				arr = rd_kafka_topic_conf_dump(topic_conf,
+							       &cnt);
+			}
+
+			for (i = 0 ; i < cnt ; i += 2)
+				printf("%s = %s\n",
+				       arr[i], arr[i+1]);
+
+			printf("\n");
+
+			rd_kafka_conf_dump_free(arr, cnt);
+		}
+
+		exit(0);
+	}
+
+
+	if (optind != argc || (mode != 'L' && !topic)) {
+	usage:
+		fprintf(stderr,
+			"Usage: %s -C|-P|-L -t <topic> "
+			"[-p <partition>] [-b <host1:port1,host2:port2,..>]\n"
+			"\n"
+			"librdkafka version %s (0x%08x)\n"
+			"\n"
+			" Options:\n"
+			"  -C | -P         Consumer or Producer mode\n"
+                        "  -L              Metadata list mode\n"
+			"  -t <topic>      Topic to fetch / produce\n"
+			"  -p <num>        Partition (random partitioner)\n"
+			"  -k <zookeepers> Zookeeper address (localhost:2181)\n"
+			"  -z <codec>      Enable compression:\n"
+			"                  none|gzip|snappy\n"
+			"  -o <offset>     Start offset (consumer)\n"
+			"  -e              Exit consumer when last message\n"
+			"                  in partition has been received.\n"
+			"  -d [facs..]     Enable debugging contexts:\n"
+			"  -q              Be quiet\n"
+			"  -A              Raw payload output (consumer)\n"
+			"                  %s\n"
+			"  -X <prop=name> Set arbitrary librdkafka "
+			"configuration property\n"
+			"               Properties prefixed with \"topic.\" "
+			"will be set on topic object.\n"
+			"               Use '-X list' to see the full list\n"
+			"               of supported properties.\n"
+			"\n"
+			" In Consumer mode:\n"
+			"  writes fetched messages to stdout\n"
+			" In Producer mode:\n"
+			"  reads messages from stdin and sends to broker\n"
+                        " In List mode:\n"
+                        "  queries broker for metadata information, "
+                        "topic is optional.\n"
+			"\n"
+			"\n"
+			"\n",
+			argv[0],
+			rd_kafka_version_str(), rd_kafka_version(),
+			RD_KAFKA_DEBUG_CONTEXTS);
+		exit(1);
+	}
+
+
+	signal(SIGINT, stop);
+	signal(SIGUSR1, sig_usr1);
+
+	if (debug &&
+	    rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) !=
+	    RD_KAFKA_CONF_OK) {
+		fprintf(stderr, "%% Debug configuration failed: %s: %s\n",
+			errstr, debug);
+		exit(1);
+	}
+
+        /* Set logger */
+        rd_kafka_conf_set_log_cb(conf, logger);
+
+	/** Initialize zookeeper */
+	zh = initialize_zookeeper(zookeeper, debug != NULL);
+
+	/* Add brokers */
+	set_brokerlist_from_zookeeper(zh, brokers);
+        if (rd_kafka_conf_set(conf, "metadata.broker.list",
+                              brokers, errstr, sizeof(errstr) !=
+                              RD_KAFKA_CONF_OK)) {
+                fprintf(stderr, "%% Failed to set brokers: %s\n", errstr);
+                exit(1);
+        }
+
+	if (debug) {
+		printf("Broker list from zookeeper cluster %s: %s\n", zookeeper, brokers);
+	}
+
+	if (mode == 'P') {
+		/*
+		 * Producer
+		 */
+		char buf[2048];
+		int sendcnt = 0;
+
+		/* Set up a message delivery report callback.
+		 * It will be called once for each message, either on successful
+		 * delivery to broker, or upon failure to deliver to broker. */
+		rd_kafka_conf_set_dr_cb(conf, msg_delivered);
+
+		/* Create Kafka handle */
+		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
+					errstr, sizeof(errstr)))) {
+			fprintf(stderr,
+				"%% Failed to create new producer: %s\n",
+				errstr);
+			exit(1);
+		}
+
+		/* Create topic */
+		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+
+		if (!quiet)
+			fprintf(stderr,
+				"%% Type stuff and hit enter to send\n");
+
+		while (run && fgets(buf, sizeof(buf), stdin)) {
+			size_t len = strlen(buf);
+			if (buf[len-1] == '\n')
+				buf[--len] = '\0';
+
+			/* Send/Produce message. */
+			if (rd_kafka_produce(rkt, partition,
+					     RD_KAFKA_MSG_F_COPY,
+					     /* Payload and length */
+					     buf, len,
+					     /* Optional key and its length */
+					     NULL, 0,
+					     /* Message opaque, provided in
+					      * delivery report callback as
+					      * msg_opaque. */
+					     NULL) == -1) {
+				fprintf(stderr,
+					"%% Failed to produce to topic %s "
+					"partition %i: %s\n",
+					rd_kafka_topic_name(rkt), partition,
+					rd_kafka_err2str(
+						rd_kafka_errno2err(errno)));
+				/* Poll to handle delivery reports */
+				rd_kafka_poll(rk, 0);
+				continue;
+			}
+
+			if (!quiet)
+				fprintf(stderr, "%% Sent %zd bytes to topic "
+					"%s partition %i\n",
+				len, rd_kafka_topic_name(rkt), partition);
+			sendcnt++;
+			/* Poll to handle delivery reports */
+			rd_kafka_poll(rk, 0);
+		}
+
+		/* Poll to handle delivery reports */
+		rd_kafka_poll(rk, 0);
+
+		/* Wait for messages to be delivered */
+		while (run && rd_kafka_outq_len(rk) > 0)
+			rd_kafka_poll(rk, 100);
+
+		/* Destroy the handle */
+		rd_kafka_destroy(rk);
+
+	} else if (mode == 'C') {
+		/*
+		 * Consumer
+		 */
+
+		/* Create Kafka handle */
+		if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
+					errstr, sizeof(errstr)))) {
+			fprintf(stderr,
+				"%% Failed to create new consumer: %s\n",
+				errstr);
+			exit(1);
+		}
+
+		/* Create topic */
+		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+
+		/* Start consuming */
+		if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){
+			fprintf(stderr, "%% Failed to start consuming: %s\n",
+				rd_kafka_err2str(rd_kafka_errno2err(errno)));
+			exit(1);
+		}
+
+		while (run) {
+			rd_kafka_message_t *rkmessage;
+
+			/* Consume single message.
+			 * See rdkafka_performance.c for high speed
+			 * consuming of messages. */
+			rkmessage = rd_kafka_consume(rkt, partition, 1000);
+			if (!rkmessage) /* timeout */
+				continue;
+
+			msg_consume(rkmessage, NULL);
+
+			/* Return message to rdkafka */
+			rd_kafka_message_destroy(rkmessage);
+		}
+
+		/* Stop consuming */
+		rd_kafka_consume_stop(rkt, partition);
+
+		rd_kafka_topic_destroy(rkt);
+
+		rd_kafka_destroy(rk);
+
+	} else if (mode == 'L') {
+		rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+		/* Create Kafka handle */
+		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
+					errstr, sizeof(errstr)))) {
+			fprintf(stderr,
+				"%% Failed to create new producer: %s\n",
+				errstr);
+			exit(1);
+		}
+
+		/* Create topic */
+		if (topic)
+			rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+		else
+			rkt = NULL;
+
+		while (run) {
+				const struct rd_kafka_metadata *metadata;
+
+				/* Fetch metadata */
+				err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt,
+										&metadata, 5000);
+				if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
+						fprintf(stderr,
+								"%% Failed to acquire metadata: %s\n",
+								rd_kafka_err2str(err));
+						run = 0;
+						break;
+				}
+
+				metadata_print(topic, metadata);
+
+				rd_kafka_metadata_destroy(metadata);
+				run = 0;
+		}
+
+		/* Destroy the handle */
+		rd_kafka_destroy(rk);
+
+		/* Exit right away, dont wait for background cleanup, we haven't
+		 * done anything important anyway. */
+		exit(err ? 2 : 0);
+	}
+
+	/* Let background threads clean up and terminate cleanly. */
+	rd_kafka_wait_destroyed(2000);
+
+	/** Free the zookeeper data. */
+	zookeeper_close(zh);
+
+	return 0;
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/lds-gen.py
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/lds-gen.py b/thirdparty/librdkafka-0.11.4/lds-gen.py
new file mode 100755
index 0000000..1136580
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/lds-gen.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+#
+#
+# Generate linker script to only expose symbols of the public API
+#
+
+import sys
+import re
+
+
+if __name__ == '__main__':
+
+    funcs = list()
+    last_line = ''
+
+    for line in sys.stdin:
+        m = re.match(r'^(\S+.*\s+\**)?(rd_kafka_\S+)\s*\(', line)
+        if m:
+            sym = m.group(2)
+            # Ignore static (unused) functions
+            m2 = re.match(r'(RD_UNUSED|__attribute__\(\(unused\)\))', last_line)
+            if not m2:
+                funcs.append(sym)
+            last_line = ''
+        else:
+            last_line = line
+
+    print('# Automatically generated by lds-gen.py - DO NOT EDIT')
+    print('{\n global:')
+    if len(funcs) == 0:
+        print('    *;')
+    else:
+        for f in sorted(funcs):
+            print('    %s;' % f)
+
+        print('local:\n    *;')
+
+    print('};')

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mainpage.doxy
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mainpage.doxy b/thirdparty/librdkafka-0.11.4/mainpage.doxy
new file mode 100644
index 0000000..97f2456
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mainpage.doxy
@@ -0,0 +1,35 @@
+/**
+ * @mainpage librdkafka documentation
+ *
+ * librdkafka is the Apache Kafka C/C++ client library.
+ *
+ * @section intro Introduction
+ *
+ * For an introduction and manual to librdkafka see INTRODUCTION.md
+ *
+ * @section conf Configuration
+ *
+ * librdkafka is highly configurable to meet any deployment demands.
+ * It is usually safe to leave most configuration properties to their default
+ * values.
+ *
+ * See CONFIGURATION.md for the full list of supported configuration properties.
+ *
+ * @remark Application developers are recommended to provide a non-hardcoded
+ *         interface to librdkafka's string based name-value configuration
+ *         property interface, allowing users to configure any librdkafka
+ *         property directly without alterations to the application.
+ *         This allows for seamless upgrades where linking to a new version
+ *         of librdkafka automatically provides new configuration
+ *         based features.
+
+ *
+ * @section c_api C API
+ *
+ * The C API is documented in rdkafka.h
+ *
+ * @section cpp_api C++ API
+ *
+ * The C++ API is documented in rdkafkacpp.h
+ */
+


[43/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/rdkafkacpp.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/rdkafkacpp.h b/thirdparty/librdkafka-0.11.1/src-cpp/rdkafkacpp.h
deleted file mode 100644
index 43e2af0..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/rdkafkacpp.h
+++ /dev/null
@@ -1,2190 +0,0 @@
-/*
- * librdkafka - Apache Kafka C/C++ library
- *
- * Copyright (c) 2014 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-/**
- * @file rdkafkacpp.h
- * @brief Apache Kafka C/C++ consumer and producer client library.
- *
- * rdkafkacpp.h contains the public C++ API for librdkafka.
- * The API is documented in this file as comments prefixing the class,
- * function, type, enum, define, etc.
- * For more information, see the C interface in rdkafka.h and read the
- * manual in INTRODUCTION.md.
- * The C++ interface is STD C++ '03 compliant and adheres to the
- * Google C++ Style Guide.
-
- * @sa For the C interface see rdkafka.h
- *
- * @tableofcontents
- */
-
-/**@cond NO_DOC*/
-#include <string>
-#include <list>
-#include <vector>
-#include <stdint.h>
-
-
-#ifdef _MSC_VER
-#undef RD_EXPORT
-#ifdef LIBRDKAFKA_STATICLIB
-#define RD_EXPORT
-#else
-#ifdef LIBRDKAFKACPP_EXPORTS
-#define RD_EXPORT __declspec(dllexport)
-#else
-#define RD_EXPORT __declspec(dllimport)
-#endif
-#endif
-#else
-#define RD_EXPORT
-#endif
-
-/**@endcond*/
-
-namespace RdKafka {
-
-
-/**
- * @name Miscellaneous APIs
- * @{
- */
-
-/**
- * @brief librdkafka version
- *
- * Interpreted as hex \c MM.mm.rr.xx:
- *  - MM = Major
- *  - mm = minor
- *  - rr = revision
- *  - xx = pre-release id (0xff is the final release)
- *
- * E.g.: \c 0x000801ff = 0.8.1
- *
- * @remark This value should only be used during compile time,
- *         for runtime checks of version use RdKafka::version()
- */
-#define RD_KAFKA_VERSION  0x000b01ff
-
-/**
- * @brief Returns the librdkafka version as integer.
- *
- * @sa See RD_KAFKA_VERSION for how to parse the integer format.
- */
-RD_EXPORT
-int          version ();
-
-/**
- * @brief Returns the librdkafka version as string.
- */
-RD_EXPORT
-std::string  version_str();
-
-/**
- * @brief Returns a CSV list of the supported debug contexts
- *        for use with Conf::Set("debug", ..).
- */
-RD_EXPORT
-std::string get_debug_contexts();
-
-/**
- * @brief Wait for all rd_kafka_t objects to be destroyed.
- *
- * @returns 0 if all kafka objects are now destroyed, or -1 if the
- * timeout was reached.
- * Since RdKafka handle deletion is an asynch operation the
- * \p wait_destroyed() function can be used for applications where
- * a clean shutdown is required.
- */
-RD_EXPORT
-int          wait_destroyed(int timeout_ms);
-
-
-/**@}*/
-
-
-
-/**
- * @name Constants, errors, types
- * @{
- *
- *
- */
-
-/**
- * @brief Error codes.
- *
- * The negative error codes delimited by two underscores
- * (\c _ERR__..) denotes errors internal to librdkafka and are
- * displayed as \c \"Local: \<error string..\>\", while the error codes
- * delimited by a single underscore (\c ERR_..) denote broker
- * errors and are displayed as \c \"Broker: \<error string..\>\".
- *
- * @sa Use RdKafka::err2str() to translate an error code a human readable string
- */
-enum ErrorCode {
-	/* Internal errors to rdkafka: */
-	/** Begin internal error codes */
-	ERR__BEGIN = -200,
-	/** Received message is incorrect */
-	ERR__BAD_MSG = -199,
-	/** Bad/unknown compression */
-	ERR__BAD_COMPRESSION = -198,
-	/** Broker is going away */
-	ERR__DESTROY = -197,
-	/** Generic failure */
-	ERR__FAIL = -196,
-	/** Broker transport failure */
-	ERR__TRANSPORT = -195,
-	/** Critical system resource */
-	ERR__CRIT_SYS_RESOURCE = -194,
-	/** Failed to resolve broker */
-	ERR__RESOLVE = -193,
-	/** Produced message timed out*/
-	ERR__MSG_TIMED_OUT = -192,
-	/** Reached the end of the topic+partition queue on
-	 * the broker. Not really an error. */
-	ERR__PARTITION_EOF = -191,
-	/** Permanent: Partition does not exist in cluster. */
-	ERR__UNKNOWN_PARTITION = -190,
-	/** File or filesystem error */
-	ERR__FS = -189,
-	 /** Permanent: Topic does not exist in cluster. */
-	ERR__UNKNOWN_TOPIC = -188,
-	/** All broker connections are down. */
-	ERR__ALL_BROKERS_DOWN = -187,
-	/** Invalid argument, or invalid configuration */
-	ERR__INVALID_ARG = -186,
-	/** Operation timed out */
-	ERR__TIMED_OUT = -185,
-	/** Queue is full */
-	ERR__QUEUE_FULL = -184,
-	/** ISR count < required.acks */
-        ERR__ISR_INSUFF = -183,
-	/** Broker node update */
-        ERR__NODE_UPDATE = -182,
-	/** SSL error */
-	ERR__SSL = -181,
-	/** Waiting for coordinator to become available. */
-        ERR__WAIT_COORD = -180,
-	/** Unknown client group */
-        ERR__UNKNOWN_GROUP = -179,
-	/** Operation in progress */
-        ERR__IN_PROGRESS = -178,
-	 /** Previous operation in progress, wait for it to finish. */
-        ERR__PREV_IN_PROGRESS = -177,
-	 /** This operation would interfere with an existing subscription */
-        ERR__EXISTING_SUBSCRIPTION = -176,
-	/** Assigned partitions (rebalance_cb) */
-        ERR__ASSIGN_PARTITIONS = -175,
-	/** Revoked partitions (rebalance_cb) */
-        ERR__REVOKE_PARTITIONS = -174,
-	/** Conflicting use */
-        ERR__CONFLICT = -173,
-	/** Wrong state */
-        ERR__STATE = -172,
-	/** Unknown protocol */
-        ERR__UNKNOWN_PROTOCOL = -171,
-	/** Not implemented */
-        ERR__NOT_IMPLEMENTED = -170,
-	/** Authentication failure*/
-	ERR__AUTHENTICATION = -169,
-	/** No stored offset */
-	ERR__NO_OFFSET = -168,
-	/** Outdated */
-	ERR__OUTDATED = -167,
-	/** Timed out in queue */
-	ERR__TIMED_OUT_QUEUE = -166,
-        /** Feature not supported by broker */
-        ERR__UNSUPPORTED_FEATURE = -165,
-        /** Awaiting cache update */
-        ERR__WAIT_CACHE = -164,
-        /** Operation interrupted */
-        ERR__INTR = -163,
-        /** Key serialization error */
-        ERR__KEY_SERIALIZATION = -162,
-        /** Value serialization error */
-        ERR__VALUE_SERIALIZATION = -161,
-        /** Key deserialization error */
-        ERR__KEY_DESERIALIZATION = -160,
-        /** Value deserialization error */
-        ERR__VALUE_DESERIALIZATION = -159,
-	/** End internal error codes */
-	ERR__END = -100,
-
-	/* Kafka broker errors: */
-	/** Unknown broker error */
-	ERR_UNKNOWN = -1,
-	/** Success */
-	ERR_NO_ERROR = 0,
-	/** Offset out of range */
-	ERR_OFFSET_OUT_OF_RANGE = 1,
-	/** Invalid message */
-	ERR_INVALID_MSG = 2,
-	/** Unknown topic or partition */
-	ERR_UNKNOWN_TOPIC_OR_PART = 3,
-	/** Invalid message size */
-	ERR_INVALID_MSG_SIZE = 4,
-	/** Leader not available */
-	ERR_LEADER_NOT_AVAILABLE = 5,
-	/** Not leader for partition */
-	ERR_NOT_LEADER_FOR_PARTITION = 6,
-	/** Request timed out */
-	ERR_REQUEST_TIMED_OUT = 7,
-	/** Broker not available */
-	ERR_BROKER_NOT_AVAILABLE = 8,
-	/** Replica not available */
-	ERR_REPLICA_NOT_AVAILABLE = 9,
-	/** Message size too large */
-	ERR_MSG_SIZE_TOO_LARGE = 10,
-	/** StaleControllerEpochCode */
-	ERR_STALE_CTRL_EPOCH = 11,
-	/** Offset metadata string too large */
-	ERR_OFFSET_METADATA_TOO_LARGE = 12,
-	/** Broker disconnected before response received */
-	ERR_NETWORK_EXCEPTION = 13,
-	/** Group coordinator load in progress */
-        ERR_GROUP_LOAD_IN_PROGRESS = 14,
-	 /** Group coordinator not available */
-        ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15,
-	/** Not coordinator for group */
-        ERR_NOT_COORDINATOR_FOR_GROUP = 16,
-	/** Invalid topic */
-        ERR_TOPIC_EXCEPTION = 17,
-	/** Message batch larger than configured server segment size */
-        ERR_RECORD_LIST_TOO_LARGE = 18,
-	/** Not enough in-sync replicas */
-        ERR_NOT_ENOUGH_REPLICAS = 19,
-	/** Message(s) written to insufficient number of in-sync replicas */
-        ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
-	/** Invalid required acks value */
-        ERR_INVALID_REQUIRED_ACKS = 21,
-	/** Specified group generation id is not valid */
-        ERR_ILLEGAL_GENERATION = 22,
-	/** Inconsistent group protocol */
-        ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
-	/** Invalid group.id */
-	ERR_INVALID_GROUP_ID = 24,
-	/** Unknown member */
-        ERR_UNKNOWN_MEMBER_ID = 25,
-	/** Invalid session timeout */
-        ERR_INVALID_SESSION_TIMEOUT = 26,
-	/** Group rebalance in progress */
-	ERR_REBALANCE_IN_PROGRESS = 27,
-	/** Commit offset data size is not valid */
-        ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
-	/** Topic authorization failed */
-        ERR_TOPIC_AUTHORIZATION_FAILED = 29,
-	/** Group authorization failed */
-	ERR_GROUP_AUTHORIZATION_FAILED = 30,
-	/** Cluster authorization failed */
-	ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
-        /** Invalid timestamp */
-        ERR_INVALID_TIMESTAMP = 32,
-        /** Unsupported SASL mechanism */
-        ERR_UNSUPPORTED_SASL_MECHANISM = 33,
-        /** Illegal SASL state */
-        ERR_ILLEGAL_SASL_STATE = 34,
-        /** Unuspported version */
-        ERR_UNSUPPORTED_VERSION = 35,
-        /** Topic already exists */
-        ERR_TOPIC_ALREADY_EXISTS = 36,
-        /** Invalid number of partitions */
-        ERR_INVALID_PARTITIONS = 37,
-        /** Invalid replication factor */
-        ERR_INVALID_REPLICATION_FACTOR = 38,
-        /** Invalid replica assignment */
-        ERR_INVALID_REPLICA_ASSIGNMENT = 39,
-        /** Invalid config */
-        ERR_INVALID_CONFIG = 40,
-        /** Not controller for cluster */
-        ERR_NOT_CONTROLLER = 41,
-        /** Invalid request */
-        ERR_INVALID_REQUEST = 42,
-        /** Message format on broker does not support request */
-        ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
-        /** Isolation policy volation */
-        ERR_POLICY_VIOLATION = 44,
-        /** Broker received an out of order sequence number */
-        ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
-        /** Broker received a duplicate sequence number */
-        ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
-        /** Producer attempted an operation with an old epoch */
-        ERR_INVALID_PRODUCER_EPOCH = 47,
-        /** Producer attempted a transactional operation in an invalid state */
-        ERR_INVALID_TXN_STATE = 48,
-        /** Producer attempted to use a producer id which is not
-         *  currently assigned to its transactional id */
-        ERR_INVALID_PRODUCER_ID_MAPPING = 49,
-        /** Transaction timeout is larger than the maximum
-         *  value allowed by the broker's max.transaction.timeout.ms */
-        ERR_INVALID_TRANSACTION_TIMEOUT = 50,
-        /** Producer attempted to update a transaction while another
-         *  concurrent operation on the same transaction was ongoing */
-        ERR_CONCURRENT_TRANSACTIONS = 51,
-        /** Indicates that the transaction coordinator sending a
-         *  WriteTxnMarker is no longer the current coordinator for a
-         *  given producer */
-        ERR_TRANSACTION_COORDINATOR_FENCED = 52,
-        /** Transactional Id authorization failed */
-        ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
-        /** Security features are disabled */
-        ERR_SECURITY_DISABLED = 54,
-        /** Operation not attempted */
-        ERR_OPERATION_NOT_ATTEMPTED = 55
-};
-
-
-/**
- * @brief Returns a human readable representation of a kafka error.
- */
-RD_EXPORT
-std::string  err2str(RdKafka::ErrorCode err);
-
-
-/**@} */
-
-
-
-/**@cond NO_DOC*/
-/* Forward declarations */
-class Producer;
-class Message;
-class Queue;
-class Event;
-class Topic;
-class TopicPartition;
-class Metadata;
-class KafkaConsumer;
-/**@endcond*/
-
-
-/**
- * @name Callback classes
- * @{
- *
- *
- * librdkafka uses (optional) callbacks to propagate information and
- * delegate decisions to the application logic.
- *
- * An application must call RdKafka::poll() at regular intervals to
- * serve queued callbacks.
- */
-
-
-/**
- * @brief Delivery Report callback class
- *
- * The delivery report callback will be called once for each message
- * accepted by RdKafka::Producer::produce() (et.al) with
- * RdKafka::Message::err() set to indicate the result of the produce request.
- *
- * The callback is called when a message is succesfully produced or
- * if librdkafka encountered a permanent failure, or the retry counter for
- * temporary errors has been exhausted.
- *
- * An application must call RdKafka::poll() at regular intervals to
- * serve queued delivery report callbacks.
-
- */
-class RD_EXPORT DeliveryReportCb {
- public:
-  /**
-   * @brief Delivery report callback.
-   */
-  virtual void dr_cb (Message &message) = 0;
-
-  virtual ~DeliveryReportCb() { }
-};
-
-
-/**
- * @brief Partitioner callback class
- *
- * Generic partitioner callback class for implementing custom partitioners.
- *
- * @sa RdKafka::Conf::set() \c "partitioner_cb"
- */
-class RD_EXPORT PartitionerCb {
- public:
-  /**
-   * @brief Partitioner callback
-   *
-   * Return the partition to use for \p key in \p topic.
-   *
-   * The \p msg_opaque is the same \p msg_opaque provided in the
-   * RdKafka::Producer::produce() call.
-   *
-   * @remark \p key may be NULL or the empty.
-   *
-   * @returns Must return a value between 0 and \p partition_cnt (non-inclusive).
-   *          May return RD_KAFKA_PARTITION_UA (-1) if partitioning failed.
-   *
-   * @sa The callback may use RdKafka::Topic::partition_available() to check
-   *     if a partition has an active leader broker.
-   */
-  virtual int32_t partitioner_cb (const Topic *topic,
-                                  const std::string *key,
-                                  int32_t partition_cnt,
-                                  void *msg_opaque) = 0;
-
-  virtual ~PartitionerCb() { }
-};
-
-/**
- * @brief  Variant partitioner with key pointer
- *
- */
-class PartitionerKeyPointerCb {
- public:
-  /**
-   * @brief Variant partitioner callback that gets \p key as pointer and length
-   *        instead of as a const std::string *.
-   *
-   * @remark \p key may be NULL or have \p key_len 0.
-   *
-   * @sa See RdKafka::PartitionerCb::partitioner_cb() for exact semantics
-   */
-  virtual int32_t partitioner_cb (const Topic *topic,
-                                  const void *key,
-                                  size_t key_len,
-                                  int32_t partition_cnt,
-                                  void *msg_opaque) = 0;
-
-  virtual ~PartitionerKeyPointerCb() { }
-};
-
-
-
-/**
- * @brief Event callback class
- *
- * Events are a generic interface for propagating errors, statistics, logs, etc
- * from librdkafka to the application.
- *
- * @sa RdKafka::Event
- */
-class RD_EXPORT EventCb {
- public:
-  /**
-   * @brief Event callback
-   *
-   * @sa RdKafka::Event
-   */
-  virtual void event_cb (Event &event) = 0;
-
-  virtual ~EventCb() { }
-};
-
-
-/**
- * @brief Event object class as passed to the EventCb callback.
- */
-class RD_EXPORT Event {
- public:
-  /** @brief Event type */
-  enum Type {
-    EVENT_ERROR,     /**< Event is an error condition */
-    EVENT_STATS,     /**< Event is a statistics JSON document */
-    EVENT_LOG,       /**< Event is a log message */
-    EVENT_THROTTLE   /**< Event is a throttle level signaling from the broker */
-  };
-
-  /** @brief EVENT_LOG severities (conforms to syslog(3) severities) */
-  enum Severity {
-    EVENT_SEVERITY_EMERG = 0,
-    EVENT_SEVERITY_ALERT = 1,
-    EVENT_SEVERITY_CRITICAL = 2,
-    EVENT_SEVERITY_ERROR = 3,
-    EVENT_SEVERITY_WARNING = 4,
-    EVENT_SEVERITY_NOTICE = 5,
-    EVENT_SEVERITY_INFO = 6,
-    EVENT_SEVERITY_DEBUG = 7
-  };
-
-  virtual ~Event () { }
-
-  /*
-   * Event Accessor methods
-   */
-
-  /**
-   * @returns The event type
-   * @remark Applies to all event types
-   */
-  virtual Type        type () const = 0;
-
-  /**
-   * @returns Event error, if any.
-   * @remark Applies to all event types except THROTTLE
-   */
-  virtual ErrorCode   err () const = 0;
-
-  /**
-   * @returns Log severity level.
-   * @remark Applies to LOG event type.
-   */
-  virtual Severity    severity () const = 0;
-
-  /**
-   * @returns Log facility string.
-   * @remark Applies to LOG event type.
-   */
-  virtual std::string fac () const = 0;
-
-  /**
-   * @returns Log message string.
-   *
-   * \c EVENT_LOG: Log message string.
-   * \c EVENT_STATS: JSON object (as string).
-   *
-   * @remark Applies to LOG event type.
-   */
-  virtual std::string str () const = 0;
-
-  /**
-   * @returns Throttle time in milliseconds.
-   * @remark Applies to THROTTLE event type.
-   */
-  virtual int         throttle_time () const = 0;
-
-  /**
-   * @returns Throttling broker's name.
-   * @remark Applies to THROTTLE event type.
-   */
-  virtual std::string broker_name () const = 0;
-
-  /**
-   * @returns Throttling broker's id.
-   * @remark Applies to THROTTLE event type.
-   */
-  virtual int         broker_id () const = 0;
-};
-
-
-
-/**
- * @brief Consume callback class
- */
-class RD_EXPORT ConsumeCb {
- public:
-  /**
-   * @brief The consume callback is used with
-   *        RdKafka::Consumer::consume_callback()
-   *        methods and will be called for each consumed \p message.
-   *
-   * The callback interface is optional but provides increased performance.
-   */
-  virtual void consume_cb (Message &message, void *opaque) = 0;
-
-  virtual ~ConsumeCb() { }
-};
-
-
-/**
- * @brief \b KafkaConsunmer: Rebalance callback class
- */
-class RD_EXPORT RebalanceCb {
-public:
-  /**
-   * @brief Group rebalance callback for use with RdKafka::KafkaConsunmer
-   *
-   * Registering a \p rebalance_cb turns off librdkafka's automatic
-   * partition assignment/revocation and instead delegates that responsibility
-   * to the application's \p rebalance_cb.
-   *
-   * The rebalance callback is responsible for updating librdkafka's
-   * assignment set based on the two events: RdKafka::ERR__ASSIGN_PARTITIONS
-   * and RdKafka::ERR__REVOKE_PARTITIONS but should also be able to handle
-   * arbitrary rebalancing failures where \p err is neither of those.
-   * @remark In this latter case (arbitrary error), the application must
-   *         call unassign() to synchronize state.
-
-   *
-   * Without a rebalance callback this is done automatically by librdkafka
-   * but registering a rebalance callback gives the application flexibility
-   * in performing other operations along with the assinging/revocation,
-   * such as fetching offsets from an alternate location (on assign)
-   * or manually committing offsets (on revoke).
-   *
-   * The following example show's the application's responsibilities:
-   * @code
-   *    class MyRebalanceCb : public RdKafka::RebalanceCb {
-   *     public:
-   *      void rebalance_cb (RdKafka::KafkaConsumer *consumer,
-   *     	      RdKafka::ErrorCode err,
-   *                  std::vector<RdKafka::TopicPartition*> &partitions) {
-   *         if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
-   *           // application may load offets from arbitrary external
-   *           // storage here and update \p partitions
-   *
-   *           consumer->assign(partitions);
-   *
-   *         } else if (err == RdKafka::ERR__REVOKE_PARTITIONS) {
-   *           // Application may commit offsets manually here
-   *           // if auto.commit.enable=false
-   *
-   *           consumer->unassign();
-   *
-   *         } else {
-   *           std::cerr << "Rebalancing error: <<
-   *                        RdKafka::err2str(err) << std::endl;
-   *           consumer->unassign();
-   *         }
-   *     }
-   *  }
-   * @endcode
-   */
- virtual void rebalance_cb (RdKafka::KafkaConsumer *consumer,
-			    RdKafka::ErrorCode err,
-                            std::vector<TopicPartition*>&partitions) = 0;
-
- virtual ~RebalanceCb() { }
-};
-
-
-/**
- * @brief Offset Commit callback class
- */
-class RD_EXPORT OffsetCommitCb {
-public:
-  /**
-   * @brief Set offset commit callback for use with consumer groups
-   *
-   * The results of automatic or manual offset commits will be scheduled
-   * for this callback and is served by RdKafka::KafkaConsumer::consume().
-   *
-   * If no partitions had valid offsets to commit this callback will be called
-   * with \p err == ERR__NO_OFFSET which is not to be considered an error.
-   *
-   * The \p offsets list contains per-partition information:
-   *   - \c topic      The topic committed
-   *   - \c partition  The partition committed
-   *   - \c offset:    Committed offset (attempted)
-   *   - \c err:       Commit error
-   */
-  virtual void offset_commit_cb(RdKafka::ErrorCode err,
-                                std::vector<TopicPartition*>&offsets) = 0;
-
-  virtual ~OffsetCommitCb() { }
-};
-
-
-
-/**
- * @brief \b Portability: SocketCb callback class
- *
- */
-class RD_EXPORT SocketCb {
- public:
-  /**
-   * @brief Socket callback
-   *
-   * The socket callback is responsible for opening a socket
-   * according to the supplied \p domain, \p type and \p protocol.
-   * The socket shall be created with \c CLOEXEC set in a racefree fashion, if
-   * possible.
-   *
-   * It is typically not required to register an alternative socket
-   * implementation
-   *
-   * @returns The socket file descriptor or -1 on error (\c errno must be set)
-   */
-  virtual int socket_cb (int domain, int type, int protocol) = 0;
-
-  virtual ~SocketCb() { }
-};
-
-
-/**
- * @brief \b Portability: OpenCb callback class
- *
- */
-class RD_EXPORT OpenCb {
- public:
-  /**
-   * @brief Open callback
-   * The open callback is responsible for opening the file specified by
-   * \p pathname, using \p flags and \p mode.
-   * The file shall be opened with \c CLOEXEC set in a racefree fashion, if
-   * possible.
-   *
-   * It is typically not required to register an alternative open implementation
-   *
-   * @remark Not currently available on native Win32
-   */
-  virtual int open_cb (const std::string &path, int flags, int mode) = 0;
-
-  virtual ~OpenCb() { }
-};
-
-
-/**@}*/
-
-
-
-
-/**
- * @name Configuration interface
- * @{
- *
- */
-
-/**
- * @brief Configuration interface
- *
- * Holds either global or topic configuration that are passed to
- * RdKafka::Consumer::create(), RdKafka::Producer::create(),
- * RdKafka::KafkaConsumer::create(), etc.
- *
- * @sa CONFIGURATION.md for the full list of supported properties.
- */
-class RD_EXPORT Conf {
- public:
-  /**
-   * @brief Configuration object type
-   */
-  enum ConfType {
-    CONF_GLOBAL, /**< Global configuration */
-    CONF_TOPIC   /**< Topic specific configuration */
-  };
-
-  /**
-   * @brief RdKafka::Conf::Set() result code
-   */
-  enum ConfResult {
-    CONF_UNKNOWN = -2,  /**< Unknown configuration property */
-    CONF_INVALID = -1,  /**< Invalid configuration value */
-    CONF_OK = 0         /**< Configuration property was succesfully set */
-  };
-
-
-  /**
-   * @brief Create configuration object
-   */
-  static Conf *create (ConfType type);
-
-  virtual ~Conf () { }
-
-  /**
-   * @brief Set configuration property \p name to value \p value.
-   *
-   * Fallthrough:
-   * Topic-level configuration properties may be set using this interface
-   * in which case they are applied on the \c default_topic_conf.
-   * If no \c default_topic_conf has been set one will be created.
-   * Any sub-sequent set("default_topic_conf", ..) calls will
-   * replace the current default topic configuration.
-
-   * @returns CONF_OK on success, else writes a human readable error
-   *          description to \p errstr on error.
-   */
-  virtual Conf::ConfResult set (const std::string &name,
-                                const std::string &value,
-                                std::string &errstr) = 0;
-
-  /** @brief Use with \p name = \c \"dr_cb\" */
-  virtual Conf::ConfResult set (const std::string &name,
-                                DeliveryReportCb *dr_cb,
-                                std::string &errstr) = 0;
-
-  /** @brief Use with \p name = \c \"event_cb\" */
-  virtual Conf::ConfResult set (const std::string &name,
-                                EventCb *event_cb,
-                                std::string &errstr) = 0;
-
-  /** @brief Use with \p name = \c \"default_topic_conf\"
-   *
-   * Sets the default topic configuration to use for for automatically
-   * subscribed topics.
-   *
-   * @sa RdKafka::KafkaConsumer::subscribe()
-   */
-  virtual Conf::ConfResult set (const std::string &name,
-                                const Conf *topic_conf,
-                                std::string &errstr) = 0;
-
-  /** @brief Use with \p name = \c \"partitioner_cb\" */
-  virtual Conf::ConfResult set (const std::string &name,
-                                PartitionerCb *partitioner_cb,
-                                std::string &errstr) = 0;
-
-  /** @brief Use with \p name = \c \"partitioner_key_pointer_cb\" */
-  virtual Conf::ConfResult set (const std::string &name,
-                                PartitionerKeyPointerCb *partitioner_kp_cb,
-                                std::string &errstr) = 0;
-
-  /** @brief Use with \p name = \c \"socket_cb\" */
-  virtual Conf::ConfResult set (const std::string &name, SocketCb *socket_cb,
-                                std::string &errstr) = 0;
-
-  /** @brief Use with \p name = \c \"open_cb\" */
-  virtual Conf::ConfResult set (const std::string &name, OpenCb *open_cb,
-                                std::string &errstr) = 0;
-
-  /** @brief Use with \p name = \c \"rebalance_cb\" */
-  virtual Conf::ConfResult set (const std::string &name,
-                                RebalanceCb *rebalance_cb,
-                                std::string &errstr) = 0;
-
-  /** @brief Use with \p name = \c \"offset_commit_cb\" */
-  virtual Conf::ConfResult set (const std::string &name,
-                                OffsetCommitCb *offset_commit_cb,
-                                std::string &errstr) = 0;
-
-  /** @brief Query single configuration value
-   *
-   * Do not use this method to get callbacks registered by the configuration file.
-   * Instead use the specific get() methods with the specific callback parameter in the signature.
-   *
-   * Fallthrough:
-   * Topic-level configuration properties from the \c default_topic_conf
-   * may be retrieved using this interface.
-   *
-   *  @returns CONF_OK if the property was set previously set and
-   *           returns the value in \p value. */
-  virtual Conf::ConfResult get(const std::string &name,
-	  std::string &value) const = 0;
-
-  /** @brief Query single configuration value
-   *  @returns CONF_OK if the property was set previously set and
-   *           returns the value in \p dr_cb. */
-  virtual Conf::ConfResult get(DeliveryReportCb *&dr_cb) const = 0;
-
-  /** @brief Query single configuration value
-   *  @returns CONF_OK if the property was set previously set and
-   *           returns the value in \p event_cb. */
-  virtual Conf::ConfResult get(EventCb *&event_cb) const = 0;
-
-  /** @brief Query single configuration value
-   *  @returns CONF_OK if the property was set previously set and
-   *           returns the value in \p partitioner_cb. */
-  virtual Conf::ConfResult get(PartitionerCb *&partitioner_cb) const = 0;
-
-  /** @brief Query single configuration value
-   *  @returns CONF_OK if the property was set previously set and
-   *           returns the value in \p partitioner_kp_cb. */
-  virtual Conf::ConfResult get(PartitionerKeyPointerCb *&partitioner_kp_cb) const = 0;
-
-  /** @brief Query single configuration value
-   *  @returns CONF_OK if the property was set previously set and
-   *           returns the value in \p socket_cb. */
-  virtual Conf::ConfResult get(SocketCb *&socket_cb) const = 0;
-
-  /** @brief Query single configuration value
-   *  @returns CONF_OK if the property was set previously set and
-   *           returns the value in \p open_cb. */
-  virtual Conf::ConfResult get(OpenCb *&open_cb) const = 0;
-
-  /** @brief Query single configuration value
-   *  @returns CONF_OK if the property was set previously set and
-   *           returns the value in \p rebalance_cb. */
-  virtual Conf::ConfResult get(RebalanceCb *&rebalance_cb) const = 0;
-
-  /** @brief Query single configuration value
-   *  @returns CONF_OK if the property was set previously set and
-   *           returns the value in \p offset_commit_cb. */
-  virtual Conf::ConfResult get(OffsetCommitCb *&offset_commit_cb) const = 0;
-
-  /** @brief Dump configuration names and values to list containing
-   *         name,value tuples */
-  virtual std::list<std::string> *dump () = 0;
-
-  /** @brief Use with \p name = \c \"consume_cb\" */
-  virtual Conf::ConfResult set (const std::string &name, ConsumeCb *consume_cb,
-				std::string &errstr) = 0;
-};
-
-/**@}*/
-
-
-/**
- * @name Kafka base client handle
- * @{
- *
- */
-
-/**
- * @brief Base handle, super class for specific clients.
- */
-class RD_EXPORT Handle {
- public:
-  virtual ~Handle() { }
-
-  /** @returns the name of the handle */
-  virtual const std::string name () const = 0;
-
-  /**
-   * @brief Returns the client's broker-assigned group member id
-   *
-   * @remark This currently requires the high-level KafkaConsumer
-   *
-   * @returns Last assigned member id, or empty string if not currently
-   *          a group member.
-   */
-  virtual const std::string memberid () const = 0;
-
-
-  /**
-   * @brief Polls the provided kafka handle for events.
-   *
-   * Events will trigger application provided callbacks to be called.
-   *
-   * The \p timeout_ms argument specifies the maximum amount of time
-   * (in milliseconds) that the call will block waiting for events.
-   * For non-blocking calls, provide 0 as \p timeout_ms.
-   * To wait indefinately for events, provide -1.
-   *
-   * Events:
-   *   - delivery report callbacks (if an RdKafka::DeliveryCb is configured) [producer]
-   *   - event callbacks (if an RdKafka::EventCb is configured) [producer & consumer]
-   *
-   * @remark  An application should make sure to call poll() at regular
-   *          intervals to serve any queued callbacks waiting to be called.
-   *
-   * @warning This method MUST NOT be used with the RdKafka::KafkaConsumer,
-   *          use its RdKafka::KafkaConsumer::consume() instead.
-   *
-   * @returns the number of events served.
-   */
-  virtual int poll (int timeout_ms) = 0;
-
-  /**
-   * @brief  Returns the current out queue length
-   *
-   * The out queue contains messages and requests waiting to be sent to,
-   * or acknowledged by, the broker.
-   */
-  virtual int outq_len () = 0;
-
-  /**
-   * @brief Request Metadata from broker.
-   *
-   * Parameters:
-   *  \p all_topics  - if non-zero: request info about all topics in cluster,
-   *                   if zero: only request info about locally known topics.
-   *  \p only_rkt    - only request info about this topic
-   *  \p metadatap   - pointer to hold metadata result.
-   *                   The \p *metadatap pointer must be released with \c delete.
-   *  \p timeout_ms  - maximum response time before failing.
-   *
-   * @returns RdKafka::ERR_NO_ERROR on success (in which case \p *metadatap
-   * will be set), else RdKafka::ERR__TIMED_OUT on timeout or
-   * other error code on error.
-   */
-  virtual ErrorCode metadata (bool all_topics, const Topic *only_rkt,
-                              Metadata **metadatap, int timeout_ms) = 0;
-
-
-  /**
-   * @brief Pause producing or consumption for the provided list of partitions.
-   *
-   * Success or error is returned per-partition in the \p partitions list.
-   *
-   * @returns ErrorCode::NO_ERROR
-   *
-   * @sa resume()
-   */
-  virtual ErrorCode pause (std::vector<TopicPartition*> &partitions) = 0;
-
-
-  /**
-   * @brief Resume producing or consumption for the provided list of partitions.
-   *
-   * Success or error is returned per-partition in the \p partitions list.
-   *
-   * @returns ErrorCode::NO_ERROR
-   *
-   * @sa pause()
-   */
-  virtual ErrorCode resume (std::vector<TopicPartition*> &partitions) = 0;
-
-
-  /**
-   * @brief Query broker for low (oldest/beginning)
-   *        and high (newest/end) offsets for partition.
-   *
-   * Offsets are returned in \p *low and \p *high respectively.
-   *
-   * @returns RdKafka::ERR_NO_ERROR on success or an error code on failure.
-   */
-  virtual ErrorCode query_watermark_offsets (const std::string &topic,
-					     int32_t partition,
-					     int64_t *low, int64_t *high,
-					     int timeout_ms) = 0;
-
-  /**
-   * @brief Get last known low (oldest/beginning)
-   *        and high (newest/end) offsets for partition.
-   *
-   * The low offset is updated periodically (if statistics.interval.ms is set)
-   * while the high offset is updated on each fetched message set from the
-   * broker.
-   *
-   * If there is no cached offset (either low or high, or both) then
-   * OFFSET_INVALID will be returned for the respective offset.
-   *
-   * Offsets are returned in \p *low and \p *high respectively.
-   *
-   * @returns RdKafka::ERR_NO_ERROR on success or an error code on failure.
-   *
-   * @remark Shall only be used with an active consumer instance.
-   */
-  virtual ErrorCode get_watermark_offsets (const std::string &topic,
-					   int32_t partition,
-					   int64_t *low, int64_t *high) = 0;
-
-
-  /**
-   * @brief Look up the offsets for the given partitions by timestamp.
-   *
-   * The returned offset for each partition is the earliest offset whose
-   * timestamp is greater than or equal to the given timestamp in the
-   * corresponding partition.
-   *
-   * The timestamps to query are represented as \c offset in \p offsets
-   * on input, and \c offset() will return the closest earlier offset
-   * for the timestamp on output.
-   *
-   * The function will block for at most \p timeout_ms milliseconds.
-   *
-   * @remark Duplicate Topic+Partitions are not supported.
-   * @remark Errors are also returned per TopicPartition, see \c err()
-   *
-   * @returns an error code for general errors, else RdKafka::ERR_NO_ERROR
-   *          in which case per-partition errors might be set.
-   */
-  virtual ErrorCode offsetsForTimes (std::vector<TopicPartition*> &offsets,
-                                     int timeout_ms) = 0;
-
-
-  /**
-   * @brief Retrieve queue for a given partition.
-   *
-   * @returns The fetch queue for the given partition if successful. Else,
-   *          NULL is returned.
-   *          
-   * @remark This function only works on consumers.
-   */
-  virtual Queue *get_partition_queue (const TopicPartition *partition) = 0;
-
-  /**
-   * @brief Forward librdkafka logs (and debug) to the specified queue
-   *        for serving with one of the ..poll() calls.
-   *
-   *        This allows an application to serve log callbacks (\c log_cb)
-   *        in its thread of choice.
-   *
-   * @param queue Queue to forward logs to. If the value is NULL the logs
-   *        are forwarded to the main queue.
-   *
-   * @remark The configuration property \c log.queue MUST also be set to true.
-   *
-   * @remark librdkafka maintains its own reference to the provided queue.
-   *
-   * @returns ERR_NO_ERROR on success or an error code on error.
-   */
-  virtual ErrorCode set_log_queue (Queue *queue) = 0;
-
-  /**
-   * @brief Cancels the current callback dispatcher (Producer::poll(),
-   *        Consumer::poll(), KafkaConsumer::consume(), etc).
-   *
-   * A callback may use this to force an immediate return to the calling
-   * code (caller of e.g. ..::poll()) without processing any further
-   * events.
-   *
-   * @remark This function MUST ONLY be called from within a
-   *         librdkafka callback.
-   */
-  virtual void yield () = 0;
-
-  /**
-   * @brief Returns the ClusterId as reported in broker metadata.
-   *
-   * @param timeout_ms If there is no cached value from metadata retrieval
-   *                   then this specifies the maximum amount of time
-   *                   (in milliseconds) the call will block waiting
-   *                   for metadata to be retrieved.
-   *                   Use 0 for non-blocking calls.
-   *
-   * @remark Requires broker version >=0.10.0 and api.version.request=true.
-   *
-   * @returns Last cached ClusterId, or empty string if no ClusterId could be
-   *          retrieved in the allotted timespan.
-   */
-  virtual const std::string clusterid (int timeout_ms) = 0;
-};
-
-
-/**@}*/
-
-
-/**
- * @name Topic and partition objects
- * @{
- *
- */
-
-/**
- * @brief Topic+Partition
- *
- * This is a generic type to hold a single partition and various
- * information about it.
- *
- * Is typically used with std::vector<RdKafka::TopicPartition*> to provide
- * a list of partitions for different operations.
- */
-class RD_EXPORT TopicPartition {
-public:
-  /**
-   * Create topic+partition object for \p topic and \p partition
-   * and optionally \p offset.
-   *
-   * Use \c delete to deconstruct.
-   */
-  static TopicPartition *create (const std::string &topic, int partition);
-  static TopicPartition *create (const std::string &topic, int partition,
-                                 int64_t offset);
-
-  virtual ~TopicPartition() = 0;
-
-  /**
-   * @brief Destroy/delete the TopicPartitions in \p partitions
-   *        and clear the vector.
-   */
-  static void destroy (std::vector<TopicPartition*> &partitions);
-
-  /** @returns topic name */
-  virtual const std::string &topic () const = 0;
-
-  /** @returns partition id */
-  virtual int partition () const = 0;
-
-  /** @returns offset (if applicable) */
-  virtual int64_t offset () const = 0;
-
-  /** @brief Set offset */
-  virtual void set_offset (int64_t offset) = 0;
-
-  /** @returns error code (if applicable) */
-  virtual ErrorCode err () const = 0;
-};
-
-
-
-/**
- * @brief Topic handle
- *
- */
-class RD_EXPORT Topic {
- public:
-  /**
-   * @brief Unassigned partition.
-   *
-   * The unassigned partition is used by the producer API for messages
-   * that should be partitioned using the configured or default partitioner.
-   */
-  static const int32_t PARTITION_UA;
-
-  /** @brief Special offsets */
-  static const int64_t OFFSET_BEGINNING; /**< Consume from beginning */
-  static const int64_t OFFSET_END; /**< Consume from end */
-  static const int64_t OFFSET_STORED; /**< Use offset storage */
-  static const int64_t OFFSET_INVALID; /**< Invalid offset */
-
-
-  /**
-   * @brief Creates a new topic handle for topic named \p topic_str
-   *
-   * \p conf is an optional configuration for the topic  that will be used
-   * instead of the default topic configuration.
-   * The \p conf object is reusable after this call.
-   *
-   * @returns the new topic handle or NULL on error (see \p errstr).
-   */
-  static Topic *create (Handle *base, const std::string &topic_str,
-                        Conf *conf, std::string &errstr);
-
-  virtual ~Topic () = 0;
-
-
-  /** @returns the topic name */
-  virtual const std::string name () const = 0;
-
-  /**
-   * @returns true if \p partition is available for the topic (has leader).
-   * @warning \b MUST \b ONLY be called from within a
-   *          RdKafka::PartitionerCb callback.
-   */
-  virtual bool partition_available (int32_t partition) const = 0;
-
-  /**
-   * @brief Store offset \p offset for topic partition \p partition.
-   * The offset will be committed (written) to the offset store according
-   * to \p auto.commit.interval.ms.
-   *
-   * @remark \c enable.auto.offset.store must be set to \c false when using this API.
-   *
-   * @returns RdKafka::ERR_NO_ERROR on success or an error code if none of the
-   *          offsets could be stored.
-   */
-  virtual ErrorCode offset_store (int32_t partition, int64_t offset) = 0;
-};
-
-
-/**@}*/
-
-
-/**
- * @name Message object
- * @{
- *
- */
-
-
-/**
- * @brief Message timestamp object
- *
- * Represents the number of milliseconds since the epoch (UTC).
- *
- * The MessageTimestampType dictates the timestamp type or origin.
- *
- * @remark Requires Apache Kafka broker version >= 0.10.0
- *
- */
-
-class RD_EXPORT MessageTimestamp {
-public:
-  enum MessageTimestampType {
-    MSG_TIMESTAMP_NOT_AVAILABLE,   /**< Timestamp not available */
-    MSG_TIMESTAMP_CREATE_TIME,     /**< Message creation time (source) */
-    MSG_TIMESTAMP_LOG_APPEND_TIME  /**< Message log append time (broker) */
-  };
-
-  MessageTimestampType type;       /**< Timestamp type */
-  int64_t timestamp;               /**< Milliseconds since epoch (UTC). */
-};
-
-
-
-/**
- * @brief Message object
- *
- * This object represents either a single consumed or produced message,
- * or an event (\p err() is set).
- *
- * An application must check RdKafka::Message::err() to see if the
- * object is a proper message (error is RdKafka::ERR_NO_ERROR) or a
- * an error event.
- *
- */
-class RD_EXPORT Message {
- public:
-  /**
-   * @brief Accessor functions*
-   * @remark Not all fields are present in all types of callbacks.
-   */
-
-  /** @returns The error string if object represent an error event,
-   *           else an empty string. */
-  virtual std::string         errstr() const = 0;
-
-  /** @returns The error code if object represents an error event, else 0. */
-  virtual ErrorCode           err () const = 0;
-
-  /** @returns the RdKafka::Topic object for a message (if applicable),
-   *            or NULL if a corresponding RdKafka::Topic object has not been
-   *            explicitly created with RdKafka::Topic::create().
-   *            In this case use topic_name() instead. */
-  virtual Topic              *topic () const = 0;
-
-  /** @returns Topic name (if applicable, else empty string) */
-  virtual std::string         topic_name () const = 0;
-
-  /** @returns Partition (if applicable) */
-  virtual int32_t             partition () const = 0;
-
-  /** @returns Message payload (if applicable) */
-  virtual void               *payload () const = 0 ;
-
-  /** @returns Message payload length (if applicable) */
-  virtual size_t              len () const = 0;
-
-  /** @returns Message key as string (if applicable) */
-  virtual const std::string  *key () const = 0;
-
-  /** @returns Message key as void pointer  (if applicable) */
-  virtual const void         *key_pointer () const = 0 ;
-
-  /** @returns Message key's binary length (if applicable) */
-  virtual size_t              key_len () const = 0;
-
-  /** @returns Message or error offset (if applicable) */
-  virtual int64_t             offset () const = 0;
-
-  /** @returns Message timestamp (if applicable) */
-  virtual MessageTimestamp    timestamp () const = 0;
-
-  /** @returns The \p msg_opaque as provided to RdKafka::Producer::produce() */
-  virtual void               *msg_opaque () const = 0;
-
-  virtual ~Message () = 0;
-
-  /** @returns the latency in microseconds for a produced message measured
-   *           from the produce() call, or -1 if latency is not available. */
-  virtual int64_t             latency () const = 0;
-};
-
-/**@}*/
-
-
-/**
- * @name Queue interface
- * @{
- *
- */
-
-
-/**
- * @brief Queue interface
- *
- * Create a new message queue.  Message queues allows the application
- * to re-route consumed messages from multiple topic+partitions into
- * one single queue point.  This queue point, containing messages from
- * a number of topic+partitions, may then be served by a single
- * consume() method, rather than one per topic+partition combination.
- *
- * See the RdKafka::Consumer::start(), RdKafka::Consumer::consume(), and
- * RdKafka::Consumer::consume_callback() methods that take a queue as the first
- * parameter for more information.
- */
-class RD_EXPORT Queue {
- public:
-  /**
-   * @brief Create Queue object
-   */
-  static Queue *create (Handle *handle);
-
-  /**
-   * @brief Forward/re-route queue to \p dst.
-   * If \p dst is \c NULL, the forwarding is removed.
-   *
-   * The internal refcounts for both queues are increased.
-   * 
-   * @remark Regardless of whether \p dst is NULL or not, after calling this
-   *         function, \p src will not forward it's fetch queue to the consumer
-   *         queue.
-   */
-  virtual ErrorCode forward (Queue *dst) = 0;
-
-
-  /**
-   * @brief Consume message or get error event from the queue.
-   *
-   * @remark Use \c delete to free the message.
-   *
-   * @returns One of:
-   *  - proper message (RdKafka::Message::err() is ERR_NO_ERROR)
-   *  - error event (RdKafka::Message::err() is != ERR_NO_ERROR)
-   *  - timeout due to no message or event in \p timeout_ms
-   *    (RdKafka::Message::err() is ERR__TIMED_OUT)
-   */
-  virtual Message *consume (int timeout_ms) = 0;
-
-  /**
-   * @brief Poll queue, serving any enqueued callbacks.
-   *
-   * @remark Must NOT be used for queues containing messages.
-   *
-   * @returns the number of events served or 0 on timeout.
-   */
-  virtual int poll (int timeout_ms) = 0;
-
-  virtual ~Queue () = 0;
-};
-
-/**@}*/
-
-
-/**
- * @name KafkaConsumer
- * @{
- *
- */
-
-
-/**
- * @brief High-level KafkaConsumer (for brokers 0.9 and later)
- *
- * @remark Requires Apache Kafka >= 0.9.0 brokers
- *
- * Currently supports the \c range and \c roundrobin partition assignment
- * strategies (see \c partition.assignment.strategy)
- */
-class RD_EXPORT KafkaConsumer : public virtual Handle {
-public:
-  /**
-   * @brief Creates a KafkaConsumer.
-   *
-   * The \p conf object must have \c group.id set to the consumer group to join.
-   *
-   * Use RdKafka::KafkaConsumer::close() to shut down the consumer.
-   *
-   * @sa RdKafka::RebalanceCb
-   * @sa CONFIGURATION.md for \c group.id, \c session.timeout.ms,
-   *     \c partition.assignment.strategy, etc.
-   */
-  static KafkaConsumer *create (Conf *conf, std::string &errstr);
-
-  virtual ~KafkaConsumer () = 0;
-
-
-  /** @brief Returns the current partition assignment as set by
-   *         RdKafka::KafkaConsumer::assign() */
-  virtual ErrorCode assignment (std::vector<RdKafka::TopicPartition*> &partitions) = 0;
-
-  /** @brief Returns the current subscription as set by
-   *         RdKafka::KafkaConsumer::subscribe() */
-  virtual ErrorCode subscription (std::vector<std::string> &topics) = 0;
-
-  /**
-   * @brief Update the subscription set to \p topics.
-   *
-   * Any previous subscription will be unassigned and  unsubscribed first.
-   *
-   * The subscription set denotes the desired topics to consume and this
-   * set is provided to the partition assignor (one of the elected group
-   * members) for all clients which then uses the configured
-   * \c partition.assignment.strategy to assign the subscription sets's
-   * topics's partitions to the consumers, depending on their subscription.
-   *
-   * The result of such an assignment is a rebalancing which is either
-   * handled automatically in librdkafka or can be overriden by the application
-   * by providing a RdKafka::RebalanceCb.
-   *
-   * The rebalancing passes the assigned partition set to
-   * RdKafka::KafkaConsumer::assign() to update what partitions are actually
-   * being fetched by the KafkaConsumer.
-   *
-   * Regex pattern matching automatically performed for topics prefixed
-   * with \c \"^\" (e.g. \c \"^myPfx[0-9]_.*\"
-   *
-   * @returns an error if the provided list of topics is invalid.
-   */
-  virtual ErrorCode subscribe (const std::vector<std::string> &topics) = 0;
-
-  /** @brief Unsubscribe from the current subscription set. */
-  virtual ErrorCode unsubscribe () = 0;
-
-  /**
-   *  @brief Update the assignment set to \p partitions.
-   *
-   * The assignment set is the set of partitions actually being consumed
-   * by the KafkaConsumer.
-   */
-  virtual ErrorCode assign (const std::vector<TopicPartition*> &partitions) = 0;
-
-  /**
-   * @brief Stop consumption and remove the current assignment.
-   */
-  virtual ErrorCode unassign () = 0;
-
-  /**
-   * @brief Consume message or get error event, triggers callbacks.
-   *
-   * Will automatically call registered callbacks for any such queued events,
-   * including RdKafka::RebalanceCb, RdKafka::EventCb, RdKafka::OffsetCommitCb,
-   * etc.
-   *
-   * @remark Use \c delete to free the message.
-   *
-   * @remark  An application should make sure to call consume() at regular
-   *          intervals, even if no messages are expected, to serve any
-   *          queued callbacks waiting to be called. This is especially
-   *          important when a RebalanceCb has been registered as it needs
-   *          to be called and handled properly to synchronize internal
-   *          consumer state.
-   *
-   * @remark Application MUST NOT call \p poll() on KafkaConsumer objects.
-   *
-   * @returns One of:
-   *  - proper message (RdKafka::Message::err() is ERR_NO_ERROR)
-   *  - error event (RdKafka::Message::err() is != ERR_NO_ERROR)
-   *  - timeout due to no message or event in \p timeout_ms
-   *    (RdKafka::Message::err() is ERR__TIMED_OUT)
-   */
-  virtual Message *consume (int timeout_ms) = 0;
-
-  /**
-   * @brief Commit offsets for the current assignment.
-   *
-   * @remark This is the synchronous variant that blocks until offsets
-   *         are committed or the commit fails (see return value).
-   *
-   * @remark If a RdKafka::OffsetCommitCb callback is registered it will
-   *         be called with commit details on a future call to
-   *         RdKafka::KafkaConsumer::consume()
-
-   *
-   * @returns ERR_NO_ERROR or error code.
-   */
-  virtual ErrorCode commitSync () = 0;
-
-  /**
-   * @brief Asynchronous version of RdKafka::KafkaConsumer::CommitSync()
-   *
-   * @sa RdKafka::KafkaConsummer::commitSync()
-   */
-  virtual ErrorCode commitAsync () = 0;
-
-  /**
-   * @brief Commit offset for a single topic+partition based on \p message
-   *
-   * @remark This is the synchronous variant.
-   *
-   * @sa RdKafka::KafkaConsummer::commitSync()
-   */
-  virtual ErrorCode commitSync (Message *message) = 0;
-
-  /**
-   * @brief Commit offset for a single topic+partition based on \p message
-   *
-   * @remark This is the asynchronous variant.
-   *
-   * @sa RdKafka::KafkaConsummer::commitSync()
-   */
-  virtual ErrorCode commitAsync (Message *message) = 0;
-
-  /**
-   * @brief Commit offsets for the provided list of partitions.
-   *
-   * @remark This is the synchronous variant.
-   */
-  virtual ErrorCode commitSync (std::vector<TopicPartition*> &offsets) = 0;
-
-  /**
-   * @brief Commit offset for the provided list of partitions.
-   *
-   * @remark This is the asynchronous variant.
-   */
-  virtual ErrorCode commitAsync (const std::vector<TopicPartition*> &offsets) = 0;
-
-  /**
-   * @brief Commit offsets for the current assignment.
-   *
-   * @remark This is the synchronous variant that blocks until offsets
-   *         are committed or the commit fails (see return value).
-   *
-   * @remark The provided callback will be called from this function.
-   *
-   * @returns ERR_NO_ERROR or error code.
-   */
-  virtual ErrorCode commitSync (OffsetCommitCb *offset_commit_cb) = 0;
-
-  /**
-   * @brief Commit offsets for the provided list of partitions.
-   *
-   * @remark This is the synchronous variant that blocks until offsets
-   *         are committed or the commit fails (see return value).
-   *
-   * @remark The provided callback will be called from this function.
-   *
-   * @returns ERR_NO_ERROR or error code.
-   */
-  virtual ErrorCode commitSync (std::vector<TopicPartition*> &offsets,
-                                OffsetCommitCb *offset_commit_cb) = 0;
-
-
-
-
-  /**
-   * @brief Retrieve committed offsets for topics+partitions.
-   *
-   * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the
-   *          \p offset or \p err field of each \p partitions' element is filled
-   *          in with the stored offset, or a partition specific error.
-   *          Else returns an error code.
-   */
-  virtual ErrorCode committed (std::vector<TopicPartition*> &partitions,
-			       int timeout_ms) = 0;
-
-  /**
-   * @brief Retrieve current positions (offsets) for topics+partitions.
-   *
-   * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the
-   *          \p offset or \p err field of each \p partitions' element is filled
-   *          in with the stored offset, or a partition specific error.
-   *          Else returns an error code.
-   */
-  virtual ErrorCode position (std::vector<TopicPartition*> &partitions) = 0;
-
-
-  /**
-   * For pausing and resuming consumption, see
-   * @sa RdKafka::Handle::pause() and RdKafka::Handle::resume()
-   */
-
-
-  /**
-   * @brief Close and shut down the proper.
-   *
-   * This call will block until the following operations are finished:
-   *  - Trigger a local rebalance to void the current assignment
-   *  - Stop consumption for current assignment
-   *  - Commit offsets
-   *  - Leave group
-   *
-   * The maximum blocking time is roughly limited to session.timeout.ms.
-   *
-   * @remark Callbacks, such as RdKafka::RebalanceCb and
-   *         RdKafka::OffsetCommitCb, etc, may be called.
-   *
-   * @remark The consumer object must later be freed with \c delete
-   */
-  virtual ErrorCode close () = 0;
-
-
-  /**
-   * @brief Seek consumer for topic+partition to offset which is either an
-   *        absolute or logical offset.
-   *
-   * If \p timeout_ms is not 0 the call will wait this long for the
-   * seek to be performed. If the timeout is reached the internal state
-   * will be unknown and this function returns `ERR__TIMED_OUT`.
-   * If \p timeout_ms is 0 it will initiate the seek but return
-   * immediately without any error reporting (e.g., async).
-   *
-   * This call triggers a fetch queue barrier flush.
-   *
-   * @remark Consumtion for the given partition must have started for the
-   *         seek to work. Use assign() to set the starting offset.
-   *
-   * @returns an ErrorCode to indicate success or failure.
-   */
-  virtual ErrorCode seek (const TopicPartition &partition, int timeout_ms) = 0;
-
-
-  /**
-   * @brief Store offset \p offset for topic partition \p partition.
-   * The offset will be committed (written) to the offset store according
-   * to \p auto.commit.interval.ms or the next manual offset-less commit*()
-   *
-   * Per-partition success/error status propagated through TopicPartition.err()
-   *
-   * @remark \c enable.auto.offset.store must be set to \c false when using this API.
-   *
-   * @returns RdKafka::ERR_NO_ERROR on success or an error code on error.
-   */
-  virtual ErrorCode offsets_store (std::vector<TopicPartition*> &offsets) = 0;
-};
-
-
-/**@}*/
-
-
-/**
- * @name Simple Consumer (legacy)
- * @{
- *
- */
-
-/**
- * @brief Simple Consumer (legacy)
- *
- * A simple non-balanced, non-group-aware, consumer.
- */
-class RD_EXPORT Consumer : public virtual Handle {
- public:
-  /**
-   * @brief Creates a new Kafka consumer handle.
-   *
-   * \p conf is an optional object that will be used instead of the default
-   * configuration.
-   * The \p conf object is reusable after this call.
-   *
-   * @returns the new handle on success or NULL on error in which case
-   * \p errstr is set to a human readable error message.
-   */
-  static Consumer *create (Conf *conf, std::string &errstr);
-
-  virtual ~Consumer () = 0;
-
-
-  /**
-   * @brief Start consuming messages for topic and \p partition
-   * at offset \p offset which may either be a proper offset (0..N)
-   * or one of the the special offsets: \p OFFSET_BEGINNING or \p OFFSET_END.
-   *
-   * rdkafka will attempt to keep \p queued.min.messages (config property)
-   * messages in the local queue by repeatedly fetching batches of messages
-   * from the broker until the threshold is reached.
-   *
-   * The application shall use one of the \p ..->consume*() functions
-   * to consume messages from the local queue, each kafka message being
-   * represented as a `RdKafka::Message *` object.
-   *
-   * \p ..->start() must not be called multiple times for the same
-   * topic and partition without stopping consumption first with
-   * \p ..->stop().
-   *
-   * @returns an ErrorCode to indicate success or failure.
-   */
-  virtual ErrorCode start (Topic *topic, int32_t partition, int64_t offset) = 0;
-
-  /**
-   * @brief Start consuming messages for topic and \p partition on
-   *        queue \p queue.
-   *
-   * @sa RdKafka::Consumer::start()
-   */
-  virtual ErrorCode start (Topic *topic, int32_t partition, int64_t offset,
-                           Queue *queue) = 0;
-
-  /**
-   * @brief Stop consuming messages for topic and \p partition, purging
-   *        all messages currently in the local queue.
-   *
-   * The application needs to be stop all consumers before destroying
-   * the Consumer handle.
-   *
-   * @returns an ErrorCode to indicate success or failure.
-   */
-  virtual ErrorCode stop (Topic *topic, int32_t partition) = 0;
-
-  /**
-   * @brief Seek consumer for topic+partition to \p offset which is either an
-   *        absolute or logical offset.
-   *
-   * If \p timeout_ms is not 0 the call will wait this long for the
-   * seek to be performed. If the timeout is reached the internal state
-   * will be unknown and this function returns `ERR__TIMED_OUT`.
-   * If \p timeout_ms is 0 it will initiate the seek but return
-   * immediately without any error reporting (e.g., async).
-   *
-   * This call triggers a fetch queue barrier flush.
-   *
-   * @returns an ErrorCode to indicate success or failure.
-   */
-  virtual ErrorCode seek (Topic *topic, int32_t partition, int64_t offset,
-			  int timeout_ms) = 0;
-
-  /**
-   * @brief Consume a single message from \p topic and \p partition.
-   *
-   * \p timeout_ms is maximum amount of time to wait for a message to be
-   * received.
-   * Consumer must have been previously started with \p ..->start().
-   *
-   * @returns a Message object, the application needs to check if message
-   * is an error or a proper message RdKafka::Message::err() and checking for
-   * \p ERR_NO_ERROR.
-   *
-   * The message object must be destroyed when the application is done with it.
-   *
-   * Errors (in RdKafka::Message::err()):
-   *  - ERR__TIMED_OUT - \p timeout_ms was reached with no new messages fetched.
-   *  - ERR__PARTITION_EOF - End of partition reached, not an error.
-   */
-  virtual Message *consume (Topic *topic, int32_t partition,
-                            int timeout_ms) = 0;
-
-  /**
-   * @brief Consume a single message from the specified queue.
-   *
-   * \p timeout_ms is maximum amount of time to wait for a message to be
-   * received.
-   * Consumer must have been previously started on the queue with
-   * \p ..->start().
-   *
-   * @returns a Message object, the application needs to check if message
-   * is an error or a proper message \p Message->err() and checking for
-   * \p ERR_NO_ERROR.
-   *
-   * The message object must be destroyed when the application is done with it.
-   *
-   * Errors (in RdKafka::Message::err()):
-   *   - ERR__TIMED_OUT - \p timeout_ms was reached with no new messages fetched
-   *
-   * Note that Message->topic() may be nullptr after certain kinds of
-   * errors, so applications should check that it isn't null before
-   * dereferencing it.
-   */
-  virtual Message *consume (Queue *queue, int timeout_ms) = 0;
-
-  /**
-   * @brief Consumes messages from \p topic and \p partition, calling
-   *        the provided callback for each consumed messsage.
-   *
-   * \p consume_callback() provides higher throughput performance
-   * than \p consume().
-   *
-   * \p timeout_ms is the maximum amount of time to wait for one or
-   * more messages to arrive.
-   *
-   * The provided \p consume_cb instance has its \p consume_cb function
-   * called for every message received.
-   *
-   * The \p opaque argument is passed to the \p consume_cb as \p opaque.
-   *
-   * @returns the number of messages processed or -1 on error.
-   *
-   * @sa RdKafka::Consumer::consume()
-   */
-  virtual int consume_callback (Topic *topic, int32_t partition,
-                                int timeout_ms,
-                                ConsumeCb *consume_cb,
-                                void *opaque) = 0;
-
-  /**
-   * @brief Consumes messages from \p queue, calling the provided callback for
-   *        each consumed messsage.
-   *
-   * @sa RdKafka::Consumer::consume_callback()
-   */
-  virtual int consume_callback (Queue *queue, int timeout_ms,
-                                RdKafka::ConsumeCb *consume_cb,
-                                void *opaque) = 0;
-
-  /**
-   * @brief Converts an offset into the logical offset from the tail of a topic.
-   *
-   * \p offset is the (positive) number of items from the end.
-   *
-   * @returns the logical offset for message \p offset from the tail, this value
-   *          may be passed to Consumer::start, et.al.
-   * @remark The returned logical offset is specific to librdkafka.
-   */
-  static int64_t OffsetTail(int64_t offset);
-};
-
-/**@}*/
-
-
-/**
- * @name Producer
- * @{
- *
- */
-
-
-/**
- * @brief Producer
- */
-class RD_EXPORT Producer : public virtual Handle {
- public:
-  /**
-   * @brief Creates a new Kafka producer handle.
-   *
-   * \p conf is an optional object that will be used instead of the default
-   * configuration.
-   * The \p conf object is reusable after this call.
-   *
-   * @returns the new handle on success or NULL on error in which case
-   *          \p errstr is set to a human readable error message.
-   */
-  static Producer *create (Conf *conf, std::string &errstr);
-
-
-  virtual ~Producer () = 0;
-
-  /**
-   * @brief RdKafka::Producer::produce() \p msgflags
-   *
-   * These flags are optional and mutually exclusive.
-   */
-  enum {
-    RK_MSG_FREE = 0x1, /**< rdkafka will free(3) \p payload
-                         * when it is done with it. */
-    RK_MSG_COPY = 0x2, /**< the \p payload data will be copied
-                        * and the \p payload pointer will not
-                        * be used by rdkafka after the
-                        * call returns. */
-    RK_MSG_BLOCK = 0x4  /**< Block produce*() on message queue
-                         *   full.
-                         *   WARNING:
-                         *   If a delivery report callback
-                         *   is used the application MUST
-                         *   call rd_kafka_poll() (or equiv.)
-                         *   to make sure delivered messages
-                         *   are drained from the internal
-                         *   delivery report queue.
-                         *   Failure to do so will result
-                         *   in indefinately blocking on
-                         *   the produce() call when the
-                         *   message queue is full.
-                         */
-
-
-  /**@cond NO_DOC*/
-  /* For backwards compatibility: */
-#ifndef MSG_COPY /* defined in sys/msg.h */
-    , /** this comma must exist betwen
-       *  RK_MSG_BLOCK and MSG_FREE
-       */
-    MSG_FREE = RK_MSG_FREE,
-    MSG_COPY = RK_MSG_COPY
-#endif
-  /**@endcond*/
-  };
-
-  /**
-   * @brief Produce and send a single message to broker.
-   *
-   * This is an asynch non-blocking API.
-   *
-   * \p partition is the target partition, either:
-   *   - RdKafka::Topic::PARTITION_UA (unassigned) for
-   *     automatic partitioning using the topic's partitioner function, or
-   *   - a fixed partition (0..N)
-   *
-   * \p msgflags is zero or more of the following flags OR:ed together:
-   *    RK_MSG_BLOCK - block \p produce*() call if
-   *                   \p queue.buffering.max.messages or
-   *                   \p queue.buffering.max.kbytes are exceeded.
-   *                   Messages are considered in-queue from the point they
-   *                   are accepted by produce() until their corresponding
-   *                   delivery report callback/event returns.
-   *                   It is thus a requirement to call 
-   *                   poll() (or equiv.) from a separate
-   *                   thread when RK_MSG_BLOCK is used.
-   *                   See WARNING on \c RK_MSG_BLOCK above.
-   *    RK_MSG_FREE - rdkafka will free(3) \p payload when it is done with it.
-   *    RK_MSG_COPY - the \p payload data will be copied and the \p payload
-   *               pointer will not be used by rdkafka after the
-   *               call returns.
-   *
-   *  NOTE: RK_MSG_FREE and RK_MSG_COPY are mutually exclusive.
-   *
-   *  If the function returns -1 and RK_MSG_FREE was specified, then
-   *  the memory associated with the payload is still the caller's
-   *  responsibility.
-   *
-   * \p payload is the message payload of size \p len bytes.
-   *
-   * \p key is an optional message key, if non-NULL it
-   * will be passed to the topic partitioner as well as be sent with the
-   * message to the broker and passed on to the consumer.
-   *
-   * \p msg_opaque is an optional application-provided per-message opaque
-   * pointer that will provided in the delivery report callback (\p dr_cb) for
-   * referencing this message.
-   *
-   * @returns an ErrorCode to indicate success or failure:
-   *  - ERR__QUEUE_FULL - maximum number of outstanding messages has been
-   *                      reached: \c queue.buffering.max.message
-   *
-   *  - ERR_MSG_SIZE_TOO_LARGE - message is larger than configured max size:
-   *                            \c messages.max.bytes
-   *
-   *  - ERR__UNKNOWN_PARTITION - requested \p partition is unknown in the
-   *                           Kafka cluster.
-   *
-   *  - ERR__UNKNOWN_TOPIC     - topic is unknown in the Kafka cluster.
-   */
-  virtual ErrorCode produce (Topic *topic, int32_t partition,
-                             int msgflags,
-                             void *payload, size_t len,
-                             const std::string *key,
-                             void *msg_opaque) = 0;
-
-  /**
-   * @brief Variant produce() that passes the key as a pointer and length
-   *        instead of as a const std::string *.
-   */
-  virtual ErrorCode produce (Topic *topic, int32_t partition,
-                             int msgflags,
-                             void *payload, size_t len,
-                             const void *key, size_t key_len,
-                             void *msg_opaque) = 0;
-
-  /**
-   * @brief produce() variant that takes topic as a string (no need for
-   *        creating a Topic object), and also allows providing the
-   *        message timestamp (microseconds since beginning of epoch, UTC).
-   *        Otherwise identical to produce() above.
-   */
-  virtual ErrorCode produce (const std::string topic_name, int32_t partition,
-                             int msgflags,
-                             void *payload, size_t len,
-                             const void *key, size_t key_len,
-                             int64_t timestamp,
-                             void *msg_opaque) = 0;
-
-
-  /**
-   * @brief Variant produce() that accepts vectors for key and payload.
-   *        The vector data will be copied.
-   */
-  virtual ErrorCode produce (Topic *topic, int32_t partition,
-                             const std::vector<char> *payload,
-                             const std::vector<char> *key,
-                             void *msg_opaque) = 0;
-
-
-  /**
-   * @brief Wait until all outstanding produce requests, et.al, are completed.
-   *        This should typically be done prior to destroying a producer instance
-   *        to make sure all queued and in-flight produce requests are completed
-   *        before terminating.
-   *
-   * @remark This function will call poll() and thus trigger callbacks.
-   *
-   * @returns ERR__TIMED_OUT if \p timeout_ms was reached before all
-   *          outstanding requests were completed, else ERR_NO_ERROR
-   */
-  virtual ErrorCode flush (int timeout_ms) = 0;
-};
-
-/**@}*/
-
-
-/**
- * @name Metadata interface
- * @{
- *
- */
-
-
-/**
- * @brief Metadata: Broker information
- */
-class BrokerMetadata {
- public:
-  /** @returns Broker id */
-  virtual int32_t id() const = 0;
-
-  /** @returns Broker hostname */
-  virtual const std::string host() const = 0;
-
-  /** @returns Broker listening port */
-  virtual int port() const = 0;
-
-  virtual ~BrokerMetadata() = 0;
-};
-
-
-
-/**
- * @brief Metadata: Partition information
- */
-class PartitionMetadata {
- public:
-  /** @brief Replicas */
-  typedef std::vector<int32_t> ReplicasVector;
-  /** @brief ISRs (In-Sync-Replicas) */
-  typedef std::vector<int32_t> ISRSVector;
-
-  /** @brief Replicas iterator */
-  typedef ReplicasVector::const_iterator ReplicasIterator;
-  /** @brief ISRs iterator */
-  typedef ISRSVector::const_iterator     ISRSIterator;
-
-
-  /** @returns Partition id */
-  virtual int32_t id() const = 0;
-
-  /** @returns Partition error reported by broker */
-  virtual ErrorCode err() const = 0;
-
-  /** @returns Leader broker (id) for partition */
-  virtual int32_t leader() const = 0;
-
-  /** @returns Replica brokers */
-  virtual const std::vector<int32_t> *replicas() const = 0;
-
-  /** @returns In-Sync-Replica brokers
-   *  @warning The broker may return a cached/outdated list of ISRs.
-   */
-  virtual const std::vector<int32_t> *isrs() const = 0;
-
-  virtual ~PartitionMetadata() = 0;
-};
-
-
-
-/**
- * @brief Metadata: Topic information
- */
-class TopicMetadata {
- public:
-  /** @brief Partitions */
-  typedef std::vector<const PartitionMetadata*> PartitionMetadataVector;
-  /** @brief Partitions iterator */
-  typedef PartitionMetadataVector::const_iterator PartitionMetadataIterator;
-
-  /** @returns Topic name */
-  virtual const std::string topic() const = 0;
-
-  /** @returns Partition list */
-  virtual const PartitionMetadataVector *partitions() const = 0;
-
-  /** @returns Topic error reported by broker */
-  virtual ErrorCode err() const = 0;
-
-  virtual ~TopicMetadata() = 0;
-};
-
-
-/**
- * @brief Metadata container
- */
-class Metadata {
- public:
-  /** @brief Brokers */
-  typedef std::vector<const BrokerMetadata*> BrokerMetadataVector;
-  /** @brief Topics */
-  typedef std::vector<const TopicMetadata*>  TopicMetadataVector;
-
-  /** @brief Brokers iterator */
-  typedef BrokerMetadataVector::const_iterator BrokerMetadataIterator;
-  /** @brief Topics iterator */
-  typedef TopicMetadataVector::const_iterator  TopicMetadataIterator;
-
-
-  /** @brief Broker list */
-  virtual const BrokerMetadataVector *brokers() const = 0;
-
-  /** @brief Topic list */
-  virtual const TopicMetadataVector  *topics() const = 0;
-
-  /** @brief Broker (id) originating this metadata */
-  virtual int32_t orig_broker_id() const = 0;
-
-  /** @brief Broker (name) originating this metadata */
-  virtual const std::string orig_broker_name() const = 0;
-
-  virtual ~Metadata() = 0;
-};
-
-/**@}*/
-
-}
-


[14/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/regexp.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/regexp.c b/thirdparty/librdkafka-0.11.1/src/regexp.c
deleted file mode 100644
index 022c4fc..0000000
--- a/thirdparty/librdkafka-0.11.1/src/regexp.c
+++ /dev/null
@@ -1,1156 +0,0 @@
-#include "rd.h"
-
-#include <stdlib.h>
-#include <string.h>
-#include <setjmp.h>
-#include <stdio.h>
-
-#include "regexp.h"
-
-#define nelem(a) (sizeof (a) / sizeof (a)[0])
-
-typedef unsigned int Rune;
-
-static int isalpharune(Rune c)
-{
-	/* TODO: Add unicode support */
-	return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
-}
-
-static Rune toupperrune(Rune c)
-{
-	/* TODO: Add unicode support */
-	if (c >= 'a' && c <= 'z')
-		return c - 'a' + 'A';
-	return c;
-}
-
-static int chartorune(Rune *r, const char *s)
-{
-	/* TODO: Add UTF-8 decoding */
-	*r = *s;
-	return 1;
-}
-
-#define REPINF 255
-#define MAXTHREAD 1000
-#define MAXSUB REG_MAXSUB
-
-typedef struct Reclass Reclass;
-typedef struct Renode Renode;
-typedef struct Reinst Reinst;
-typedef struct Rethread Rethread;
-
-struct Reclass {
-	Rune *end;
-	Rune spans[64];
-};
-
-struct Reprog {
-	Reinst *start, *end;
-	int flags;
-	unsigned int nsub;
-	Reclass cclass[16];
-};
-
-static struct {
-	Reprog *prog;
-	Renode *pstart, *pend;
-
-	const char *source;
-	unsigned int ncclass;
-	unsigned int nsub;
-	Renode *sub[MAXSUB];
-
-	int lookahead;
-	Rune yychar;
-	Reclass *yycc;
-	int yymin, yymax;
-
-	const char *error;
-	jmp_buf kaboom;
-} g;
-
-static void die(const char *message)
-{
-	g.error = message;
-	longjmp(g.kaboom, 1);
-}
-
-static Rune canon(Rune c)
-{
-	Rune u = toupperrune(c);
-	if (c >= 128 && u < 128)
-		return c;
-	return u;
-}
-
-/* Scan */
-
-enum {
-	L_CHAR = 256,
-	L_CCLASS,	/* character class */
-	L_NCCLASS,	/* negative character class */
-	L_NC,		/* "(?:" no capture */
-	L_PLA,		/* "(?=" positive lookahead */
-	L_NLA,		/* "(?!" negative lookahead */
-	L_WORD,		/* "\b" word boundary */
-	L_NWORD,	/* "\B" non-word boundary */
-	L_REF,		/* "\1" back-reference */
-	L_COUNT		/* {M,N} */
-};
-
-static int hex(int c)
-{
-	if (c >= '0' && c <= '9') return c - '0';
-	if (c >= 'a' && c <= 'f') return c - 'a' + 0xA;
-	if (c >= 'A' && c <= 'F') return c - 'A' + 0xA;
-	die("invalid escape sequence");
-	return 0;
-}
-
-static int dec(int c)
-{
-	if (c >= '0' && c <= '9') return c - '0';
-	die("invalid quantifier");
-	return 0;
-}
-
-#define ESCAPES "BbDdSsWw^$\\.*+?()[]{}|0123456789"
-
-static int nextrune(void)
-{
-	g.source += chartorune(&g.yychar, g.source);
-	if (g.yychar == '\\') {
-		g.source += chartorune(&g.yychar, g.source);
-		switch (g.yychar) {
-		case 0: die("unterminated escape sequence");
-		case 'f': g.yychar = '\f'; return 0;
-		case 'n': g.yychar = '\n'; return 0;
-		case 'r': g.yychar = '\r'; return 0;
-		case 't': g.yychar = '\t'; return 0;
-		case 'v': g.yychar = '\v'; return 0;
-		case 'c':
-			g.yychar = (*g.source++) & 31;
-			return 0;
-		case 'x':
-			g.yychar = hex(*g.source++) << 4;
-			g.yychar += hex(*g.source++);
-			if (g.yychar == 0) {
-				g.yychar = '0';
-				return 1;
-			}
-			return 0;
-		case 'u':
-			g.yychar = hex(*g.source++) << 12;
-			g.yychar += hex(*g.source++) << 8;
-			g.yychar += hex(*g.source++) << 4;
-			g.yychar += hex(*g.source++);
-			if (g.yychar == 0) {
-				g.yychar = '0';
-				return 1;
-			}
-			return 0;
-		}
-		if (strchr(ESCAPES, g.yychar))
-			return 1;
-		if (isalpharune(g.yychar) || g.yychar == '_') /* check identity escape */
-			die("invalid escape character");
-		return 0;
-	}
-	return 0;
-}
-
-static int lexcount(void)
-{
-	g.yychar = *g.source++;
-
-	g.yymin = dec(g.yychar);
-	g.yychar = *g.source++;
-	while (g.yychar != ',' && g.yychar != '}') {
-		g.yymin = g.yymin * 10 + dec(g.yychar);
-		g.yychar = *g.source++;
-	}
-	if (g.yymin >= REPINF)
-		die("numeric overflow");
-
-	if (g.yychar == ',') {
-		g.yychar = *g.source++;
-		if (g.yychar == '}') {
-			g.yymax = REPINF;
-		} else {
-			g.yymax = dec(g.yychar);
-			g.yychar = *g.source++;
-			while (g.yychar != '}') {
-				g.yymax = g.yymax * 10 + dec(g.yychar);
-				g.yychar = *g.source++;
-			}
-			if (g.yymax >= REPINF)
-				die("numeric overflow");
-		}
-	} else {
-		g.yymax = g.yymin;
-	}
-
-	return L_COUNT;
-}
-
-static void newcclass(void)
-{
-	if (g.ncclass >= nelem(g.prog->cclass))
-		die("too many character classes");
-	g.yycc = g.prog->cclass + g.ncclass++;
-	g.yycc->end = g.yycc->spans;
-}
-
-static void addrange(Rune a, Rune b)
-{
-	if (a > b)
-		die("invalid character class range");
-	if (g.yycc->end + 2 == g.yycc->spans + nelem(g.yycc->spans))
-		die("too many character class ranges");
-	*g.yycc->end++ = a;
-	*g.yycc->end++ = b;
-}
-
-static void addranges_d(void)
-{
-	addrange('0', '9');
-}
-
-static void addranges_D(void)
-{
-	addrange(0, '0'-1);
-	addrange('9'+1, 0xFFFF);
-}
-
-static void addranges_s(void)
-{
-	addrange(0x9, 0x9);
-	addrange(0xA, 0xD);
-	addrange(0x20, 0x20);
-	addrange(0xA0, 0xA0);
-	addrange(0x2028, 0x2029);
-	addrange(0xFEFF, 0xFEFF);
-}
-
-static void addranges_S(void)
-{
-	addrange(0, 0x9-1);
-	addrange(0x9+1, 0xA-1);
-	addrange(0xD+1, 0x20-1);
-	addrange(0x20+1, 0xA0-1);
-	addrange(0xA0+1, 0x2028-1);
-	addrange(0x2029+1, 0xFEFF-1);
-	addrange(0xFEFF+1, 0xFFFF);
-}
-
-static void addranges_w(void)
-{
-	addrange('0', '9');
-	addrange('A', 'Z');
-	addrange('_', '_');
-	addrange('a', 'z');
-}
-
-static void addranges_W(void)
-{
-	addrange(0, '0'-1);
-	addrange('9'+1, 'A'-1);
-	addrange('Z'+1, '_'-1);
-	addrange('_'+1, 'a'-1);
-	addrange('z'+1, 0xFFFF);
-}
-
-static int lexclass(void)
-{
-	int type = L_CCLASS;
-	int quoted, havesave, havedash;
-	Rune save = 0;
-
-	newcclass();
-
-	quoted = nextrune();
-	if (!quoted && g.yychar == '^') {
-		type = L_NCCLASS;
-		quoted = nextrune();
-	}
-
-	havesave = havedash = 0;
-	for (;;) {
-		if (g.yychar == 0)
-			die("unterminated character class");
-		if (!quoted && g.yychar == ']')
-			break;
-
-		if (!quoted && g.yychar == '-') {
-			if (havesave) {
-				if (havedash) {
-					addrange(save, '-');
-					havesave = havedash = 0;
-				} else {
-					havedash = 1;
-				}
-			} else {
-				save = '-';
-				havesave = 1;
-			}
-		} else if (quoted && strchr("DSWdsw", g.yychar)) {
-			if (havesave) {
-				addrange(save, save);
-				if (havedash)
-					addrange('-', '-');
-			}
-			switch (g.yychar) {
-			case 'd': addranges_d(); break;
-			case 's': addranges_s(); break;
-			case 'w': addranges_w(); break;
-			case 'D': addranges_D(); break;
-			case 'S': addranges_S(); break;
-			case 'W': addranges_W(); break;
-			}
-			havesave = havedash = 0;
-		} else {
-			if (quoted) {
-				if (g.yychar == 'b')
-					g.yychar = '\b';
-				else if (g.yychar == '0')
-					g.yychar = 0;
-				/* else identity escape */
-			}
-			if (havesave) {
-				if (havedash) {
-					addrange(save, g.yychar);
-					havesave = havedash = 0;
-				} else {
-					addrange(save, save);
-					save = g.yychar;
-				}
-			} else {
-				save = g.yychar;
-				havesave = 1;
-			}
-		}
-
-		quoted = nextrune();
-	}
-
-	if (havesave) {
-		addrange(save, save);
-		if (havedash)
-			addrange('-', '-');
-	}
-
-	return type;
-}
-
-static int lex(void)
-{
-	int quoted = nextrune();
-	if (quoted) {
-		switch (g.yychar) {
-		case 'b': return L_WORD;
-		case 'B': return L_NWORD;
-		case 'd': newcclass(); addranges_d(); return L_CCLASS;
-		case 's': newcclass(); addranges_s(); return L_CCLASS;
-		case 'w': newcclass(); addranges_w(); return L_CCLASS;
-		case 'D': newcclass(); addranges_d(); return L_NCCLASS;
-		case 'S': newcclass(); addranges_s(); return L_NCCLASS;
-		case 'W': newcclass(); addranges_w(); return L_NCCLASS;
-		case '0': g.yychar = 0; return L_CHAR;
-		}
-		if (g.yychar >= '0' && g.yychar <= '9') {
-			g.yychar -= '0';
-			if (*g.source >= '0' && *g.source <= '9')
-				g.yychar = g.yychar * 10 + *g.source++ - '0';
-			return L_REF;
-		}
-		return L_CHAR;
-	}
-
-	switch (g.yychar) {
-	case 0:
-	case '$': case ')': case '*': case '+':
-	case '.': case '?': case '^': case '|':
-		return g.yychar;
-	}
-
-	if (g.yychar == '{')
-		return lexcount();
-	if (g.yychar == '[')
-		return lexclass();
-	if (g.yychar == '(') {
-		if (g.source[0] == '?') {
-			if (g.source[1] == ':') {
-				g.source += 2;
-				return L_NC;
-			}
-			if (g.source[1] == '=') {
-				g.source += 2;
-				return L_PLA;
-			}
-			if (g.source[1] == '!') {
-				g.source += 2;
-				return L_NLA;
-			}
-		}
-		return '(';
-	}
-
-	return L_CHAR;
-}
-
-/* Parse */
-
-enum {
-	P_CAT, P_ALT, P_REP,
-	P_BOL, P_EOL, P_WORD, P_NWORD,
-	P_PAR, P_PLA, P_NLA,
-	P_ANY, P_CHAR, P_CCLASS, P_NCCLASS,
-	P_REF
-};
-
-struct Renode {
-	unsigned char type;
-	unsigned char ng, m, n;
-	Rune c;
-	Reclass *cc;
-	Renode *x;
-	Renode *y;
-};
-
-static Renode *newnode(int type)
-{
-	Renode *node = g.pend++;
-	node->type = type;
-	node->cc = NULL;
-	node->c = 0;
-	node->ng = 0;
-	node->m = 0;
-	node->n = 0;
-	node->x = node->y = NULL;
-	return node;
-}
-
-static int empty(Renode *node)
-{
-	if (!node) return 1;
-	switch (node->type) {
-	default: return 1;
-	case P_CAT: return empty(node->x) && empty(node->y);
-	case P_ALT: return empty(node->x) || empty(node->y);
-	case P_REP: return empty(node->x) || node->m == 0;
-	case P_PAR: return empty(node->x);
-	case P_REF: return empty(node->x);
-	case P_ANY: case P_CHAR: case P_CCLASS: case P_NCCLASS: return 0;
-	}
-}
-
-static Renode *newrep(Renode *atom, int ng, int min, int max)
-{
-	Renode *rep = newnode(P_REP);
-	if (max == REPINF && empty(atom))
-		die("infinite loop matching the empty string");
-	rep->ng = ng;
-	rep->m = min;
-	rep->n = max;
-	rep->x = atom;
-	return rep;
-}
-
-static void next(void)
-{
-	g.lookahead = lex();
-}
-
-static int re_accept(int t)
-{
-	if (g.lookahead == t) {
-		next();
-		return 1;
-	}
-	return 0;
-}
-
-static Renode *parsealt(void);
-
-static Renode *parseatom(void)
-{
-	Renode *atom;
-	if (g.lookahead == L_CHAR) {
-		atom = newnode(P_CHAR);
-		atom->c = g.yychar;
-		next();
-		return atom;
-	}
-	if (g.lookahead == L_CCLASS) {
-		atom = newnode(P_CCLASS);
-		atom->cc = g.yycc;
-		next();
-		return atom;
-	}
-	if (g.lookahead == L_NCCLASS) {
-		atom = newnode(P_NCCLASS);
-		atom->cc = g.yycc;
-		next();
-		return atom;
-	}
-	if (g.lookahead == L_REF) {
-		atom = newnode(P_REF);
-		if (g.yychar == 0 || g.yychar > g.nsub || !g.sub[g.yychar])
-			die("invalid back-reference");
-		atom->n = g.yychar;
-		atom->x = g.sub[g.yychar];
-		next();
-		return atom;
-	}
-	if (re_accept('.'))
-		return newnode(P_ANY);
-	if (re_accept('(')) {
-		atom = newnode(P_PAR);
-		if (g.nsub == MAXSUB)
-			die("too many captures");
-		atom->n = g.nsub++;
-		atom->x = parsealt();
-		g.sub[atom->n] = atom;
-		if (!re_accept(')'))
-			die("unmatched '('");
-		return atom;
-	}
-	if (re_accept(L_NC)) {
-		atom = parsealt();
-		if (!re_accept(')'))
-			die("unmatched '('");
-		return atom;
-	}
-	if (re_accept(L_PLA)) {
-		atom = newnode(P_PLA);
-		atom->x = parsealt();
-		if (!re_accept(')'))
-			die("unmatched '('");
-		return atom;
-	}
-	if (re_accept(L_NLA)) {
-		atom = newnode(P_NLA);
-		atom->x = parsealt();
-		if (!re_accept(')'))
-			die("unmatched '('");
-		return atom;
-	}
-	die("syntax error");
-	return NULL;
-}
-
-static Renode *parserep(void)
-{
-	Renode *atom;
-
-	if (re_accept('^')) return newnode(P_BOL);
-	if (re_accept('$')) return newnode(P_EOL);
-	if (re_accept(L_WORD)) return newnode(P_WORD);
-	if (re_accept(L_NWORD)) return newnode(P_NWORD);
-
-	atom = parseatom();
-	if (g.lookahead == L_COUNT) {
-		int min = g.yymin, max = g.yymax;
-		next();
-		if (max < min)
-			die("invalid quantifier");
-		return newrep(atom, re_accept('?'), min, max);
-	}
-	if (re_accept('*')) return newrep(atom, re_accept('?'), 0, REPINF);
-	if (re_accept('+')) return newrep(atom, re_accept('?'), 1, REPINF);
-	if (re_accept('?')) return newrep(atom, re_accept('?'), 0, 1);
-	return atom;
-}
-
-static Renode *parsecat(void)
-{
-	Renode *cat, *x;
-	if (g.lookahead && g.lookahead != '|' && g.lookahead != ')') {
-		cat = parserep();
-		while (g.lookahead && g.lookahead != '|' && g.lookahead != ')') {
-			x = cat;
-			cat = newnode(P_CAT);
-			cat->x = x;
-			cat->y = parserep();
-		}
-		return cat;
-	}
-	return NULL;
-}
-
-static Renode *parsealt(void)
-{
-	Renode *alt, *x;
-	alt = parsecat();
-	while (re_accept('|')) {
-		x = alt;
-		alt = newnode(P_ALT);
-		alt->x = x;
-		alt->y = parsecat();
-	}
-	return alt;
-}
-
-/* Compile */
-
-enum {
-	I_END, I_JUMP, I_SPLIT, I_PLA, I_NLA,
-	I_ANYNL, I_ANY, I_CHAR, I_CCLASS, I_NCCLASS, I_REF,
-	I_BOL, I_EOL, I_WORD, I_NWORD,
-	I_LPAR, I_RPAR
-};
-
-struct Reinst {
-	unsigned char opcode;
-	unsigned char n;
-	Rune c;
-	Reclass *cc;
-	Reinst *x;
-	Reinst *y;
-};
-
-static unsigned int count(Renode *node)
-{
-	unsigned int min, max;
-	if (!node) return 0;
-	switch (node->type) {
-	default: return 1;
-	case P_CAT: return count(node->x) + count(node->y);
-	case P_ALT: return count(node->x) + count(node->y) + 2;
-	case P_REP:
-		min = node->m;
-		max = node->n;
-		if (min == max) return count(node->x) * min;
-		if (max < REPINF) return count(node->x) * max + (max - min);
-		return count(node->x) * (min + 1) + 2;
-	case P_PAR: return count(node->x) + 2;
-	case P_PLA: return count(node->x) + 2;
-	case P_NLA: return count(node->x) + 2;
-	}
-}
-
-static Reinst *emit(Reprog *prog, int opcode)
-{
-	Reinst *inst = prog->end++;
-	inst->opcode = opcode;
-	inst->n = 0;
-	inst->c = 0;
-	inst->cc = NULL;
-	inst->x = inst->y = NULL;
-	return inst;
-}
-
-static void compile(Reprog *prog, Renode *node)
-{
-	Reinst *inst, *split, *jump;
-	unsigned int i;
-
-	if (!node)
-		return;
-
-	switch (node->type) {
-	case P_CAT:
-		compile(prog, node->x);
-		compile(prog, node->y);
-		break;
-
-	case P_ALT:
-		split = emit(prog, I_SPLIT);
-		compile(prog, node->x);
-		jump = emit(prog, I_JUMP);
-		compile(prog, node->y);
-		split->x = split + 1;
-		split->y = jump + 1;
-		jump->x = prog->end;
-		break;
-
-	case P_REP:
-		for (i = 0; i < node->m; ++i) {
-			inst = prog->end;
-			compile(prog, node->x);
-		}
-		if (node->m == node->n)
-			break;
-		if (node->n < REPINF) {
-			for (i = node->m; i < node->n; ++i) {
-				split = emit(prog, I_SPLIT);
-				compile(prog, node->x);
-				if (node->ng) {
-					split->y = split + 1;
-					split->x = prog->end;
-				} else {
-					split->x = split + 1;
-					split->y = prog->end;
-				}
-			}
-		} else if (node->m == 0) {
-			split = emit(prog, I_SPLIT);
-			compile(prog, node->x);
-			jump = emit(prog, I_JUMP);
-			if (node->ng) {
-				split->y = split + 1;
-				split->x = prog->end;
-			} else {
-				split->x = split + 1;
-				split->y = prog->end;
-			}
-			jump->x = split;
-		} else {
-			split = emit(prog, I_SPLIT);
-			if (node->ng) {
-				split->y = inst;
-				split->x = prog->end;
-			} else {
-				split->x = inst;
-				split->y = prog->end;
-			}
-		}
-		break;
-
-	case P_BOL: emit(prog, I_BOL); break;
-	case P_EOL: emit(prog, I_EOL); break;
-	case P_WORD: emit(prog, I_WORD); break;
-	case P_NWORD: emit(prog, I_NWORD); break;
-
-	case P_PAR:
-		inst = emit(prog, I_LPAR);
-		inst->n = node->n;
-		compile(prog, node->x);
-		inst = emit(prog, I_RPAR);
-		inst->n = node->n;
-		break;
-	case P_PLA:
-		split = emit(prog, I_PLA);
-		compile(prog, node->x);
-		emit(prog, I_END);
-		split->x = split + 1;
-		split->y = prog->end;
-		break;
-	case P_NLA:
-		split = emit(prog, I_NLA);
-		compile(prog, node->x);
-		emit(prog, I_END);
-		split->x = split + 1;
-		split->y = prog->end;
-		break;
-
-	case P_ANY:
-		emit(prog, I_ANY);
-		break;
-	case P_CHAR:
-		inst = emit(prog, I_CHAR);
-		inst->c = (prog->flags & REG_ICASE) ? canon(node->c) : node->c;
-		break;
-	case P_CCLASS:
-		inst = emit(prog, I_CCLASS);
-		inst->cc = node->cc;
-		break;
-	case P_NCCLASS:
-		inst = emit(prog, I_NCCLASS);
-		inst->cc = node->cc;
-		break;
-	case P_REF:
-		inst = emit(prog, I_REF);
-		inst->n = node->n;
-		break;
-	}
-}
-
-#ifdef TEST
-static void dumpnode(Renode *node)
-{
-	Rune *p;
-	if (!node) { printf("Empty"); return; }
-	switch (node->type) {
-	case P_CAT: printf("Cat("); dumpnode(node->x); printf(", "); dumpnode(node->y); printf(")"); break;
-	case P_ALT: printf("Alt("); dumpnode(node->x); printf(", "); dumpnode(node->y); printf(")"); break;
-	case P_REP:
-		printf(node->ng ? "NgRep(%d,%d," : "Rep(%d,%d,", node->m, node->n);
-		dumpnode(node->x);
-		printf(")");
-		break;
-	case P_BOL: printf("Bol"); break;
-	case P_EOL: printf("Eol"); break;
-	case P_WORD: printf("Word"); break;
-	case P_NWORD: printf("NotWord"); break;
-	case P_PAR: printf("Par(%d,", node->n); dumpnode(node->x); printf(")"); break;
-	case P_PLA: printf("PLA("); dumpnode(node->x); printf(")"); break;
-	case P_NLA: printf("NLA("); dumpnode(node->x); printf(")"); break;
-	case P_ANY: printf("Any"); break;
-	case P_CHAR: printf("Char(%c)", node->c); break;
-	case P_CCLASS:
-		printf("Class(");
-		for (p = node->cc->spans; p < node->cc->end; p += 2) printf("%02X-%02X,", p[0], p[1]);
-		printf(")");
-		break;
-	case P_NCCLASS:
-		printf("NotClass(");
-		for (p = node->cc->spans; p < node->cc->end; p += 2) printf("%02X-%02X,", p[0], p[1]);
-		printf(")");
-		break;
-	case P_REF: printf("Ref(%d)", node->n); break;
-	}
-}
-
-static void dumpprog(Reprog *prog)
-{
-	Reinst *inst;
-	int i;
-	for (i = 0, inst = prog->start; inst < prog->end; ++i, ++inst) {
-		printf("% 5d: ", i);
-		switch (inst->opcode) {
-		case I_END: puts("end"); break;
-		case I_JUMP: printf("jump %d\n", (int)(inst->x - prog->start)); break;
-		case I_SPLIT: printf("split %d %d\n", (int)(inst->x - prog->start), (int)(inst->y - prog->start)); break;
-		case I_PLA: printf("pla %d %d\n", (int)(inst->x - prog->start), (int)(inst->y - prog->start)); break;
-		case I_NLA: printf("nla %d %d\n", (int)(inst->x - prog->start), (int)(inst->y - prog->start)); break;
-		case I_ANY: puts("any"); break;
-		case I_ANYNL: puts("anynl"); break;
-		case I_CHAR: printf(inst->c >= 32 && inst->c < 127 ? "char '%c'\n" : "char U+%04X\n", inst->c); break;
-		case I_CCLASS: puts("cclass"); break;
-		case I_NCCLASS: puts("ncclass"); break;
-		case I_REF: printf("ref %d\n", inst->n); break;
-		case I_BOL: puts("bol"); break;
-		case I_EOL: puts("eol"); break;
-		case I_WORD: puts("word"); break;
-		case I_NWORD: puts("nword"); break;
-		case I_LPAR: printf("lpar %d\n", inst->n); break;
-		case I_RPAR: printf("rpar %d\n", inst->n); break;
-		}
-	}
-}
-#endif
-
-Reprog *re_regcomp(const char *pattern, int cflags, const char **errorp)
-{
-	Renode *node;
-	Reinst *split, *jump;
-	int i;
-
-	g.prog = rd_malloc(sizeof (Reprog));
-	g.pstart = g.pend = rd_malloc(sizeof (Renode) * strlen(pattern) * 2);
-
-	if (setjmp(g.kaboom)) {
-		if (errorp) *errorp = g.error;
-		rd_free(g.pstart);
-		rd_free(g.prog);
-		return NULL;
-	}
-
-	g.source = pattern;
-	g.ncclass = 0;
-	g.nsub = 1;
-	for (i = 0; i < MAXSUB; ++i)
-		g.sub[i] = 0;
-
-	g.prog->flags = cflags;
-
-	next();
-	node = parsealt();
-	if (g.lookahead == ')')
-		die("unmatched ')'");
-	if (g.lookahead != 0)
-		die("syntax error");
-
-	g.prog->nsub = g.nsub;
-	g.prog->start = g.prog->end = rd_malloc((count(node) + 6) * sizeof (Reinst));
-
-	split = emit(g.prog, I_SPLIT);
-	split->x = split + 3;
-	split->y = split + 1;
-	emit(g.prog, I_ANYNL);
-	jump = emit(g.prog, I_JUMP);
-	jump->x = split;
-	emit(g.prog, I_LPAR);
-	compile(g.prog, node);
-	emit(g.prog, I_RPAR);
-	emit(g.prog, I_END);
-
-#ifdef TEST
-	dumpnode(node);
-	putchar('\n');
-	dumpprog(g.prog);
-#endif
-
-	rd_free(g.pstart);
-
-	if (errorp) *errorp = NULL;
-	return g.prog;
-}
-
-void re_regfree(Reprog *prog)
-{
-	if (prog) {
-		rd_free(prog->start);
-		rd_free(prog);
-	}
-}
-
-/* Match */
-
-static int isnewline(int c)
-{
-	return c == 0xA || c == 0xD || c == 0x2028 || c == 0x2029;
-}
-
-static int iswordchar(int c)
-{
-	return c == '_' ||
-		(c >= 'a' && c <= 'z') ||
-		(c >= 'A' && c <= 'Z') ||
-		(c >= '0' && c <= '9');
-}
-
-static int incclass(Reclass *cc, Rune c)
-{
-	Rune *p;
-	for (p = cc->spans; p < cc->end; p += 2)
-		if (p[0] <= c && c <= p[1])
-			return 1;
-	return 0;
-}
-
-static int incclasscanon(Reclass *cc, Rune c)
-{
-	Rune *p, r;
-	for (p = cc->spans; p < cc->end; p += 2)
-		for (r = p[0]; r <= p[1]; ++r)
-			if (c == canon(r))
-				return 1;
-	return 0;
-}
-
-static int strncmpcanon(const char *a, const char *b, unsigned int n)
-{
-	Rune ra, rb;
-	int c;
-	while (n--) {
-		if (!*a) return -1;
-		if (!*b) return 1;
-		a += chartorune(&ra, a);
-		b += chartorune(&rb, b);
-		c = canon(ra) - canon(rb);
-		if (c)
-			return c;
-	}
-	return 0;
-}
-
-struct Rethread {
-	Reinst *pc;
-	const char *sp;
-	Resub sub;
-};
-
-static void spawn(Rethread *t, Reinst *pc, const char *sp, Resub *sub)
-{
-	t->pc = pc;
-	t->sp = sp;
-	memcpy(&t->sub, sub, sizeof t->sub);
-}
-
-static int match(Reinst *pc, const char *sp, const char *bol, int flags, Resub *out)
-{
-	Rethread ready[MAXTHREAD];
-	Resub scratch;
-	Resub sub;
-	Rune c;
-	unsigned int nready;
-	int i;
-
-	/* queue initial thread */
-	spawn(ready + 0, pc, sp, out);
-	nready = 1;
-
-	/* run threads in stack order */
-	while (nready > 0) {
-		--nready;
-		pc = ready[nready].pc;
-		sp = ready[nready].sp;
-		memcpy(&sub, &ready[nready].sub, sizeof sub);
-		for (;;) {
-			switch (pc->opcode) {
-			case I_END:
-				for (i = 0; i < MAXSUB; ++i) {
-					out->sub[i].sp = sub.sub[i].sp;
-					out->sub[i].ep = sub.sub[i].ep;
-				}
-				return 1;
-			case I_JUMP:
-				pc = pc->x;
-				continue;
-			case I_SPLIT:
-				if (nready >= MAXTHREAD) {
-					fprintf(stderr, "regexec: backtrack overflow!\n");
-					return 0;
-				}
-				spawn(&ready[nready++], pc->y, sp, &sub);
-				pc = pc->x;
-				continue;
-
-			case I_PLA:
-				if (!match(pc->x, sp, bol, flags, &sub))
-					goto dead;
-				pc = pc->y;
-				continue;
-			case I_NLA:
-				memcpy(&scratch, &sub, sizeof scratch);
-				if (match(pc->x, sp, bol, flags, &scratch))
-					goto dead;
-				pc = pc->y;
-				continue;
-
-			case I_ANYNL:
-				sp += chartorune(&c, sp);
-				if (c == 0)
-					goto dead;
-				break;
-			case I_ANY:
-				sp += chartorune(&c, sp);
-				if (c == 0)
-					goto dead;
-				if (isnewline(c))
-					goto dead;
-				break;
-			case I_CHAR:
-				sp += chartorune(&c, sp);
-				if (c == 0)
-					goto dead;
-				if (flags & REG_ICASE)
-					c = canon(c);
-				if (c != pc->c)
-					goto dead;
-				break;
-			case I_CCLASS:
-				sp += chartorune(&c, sp);
-				if (c == 0)
-					goto dead;
-				if (flags & REG_ICASE) {
-					if (!incclasscanon(pc->cc, canon(c)))
-						goto dead;
-				} else {
-					if (!incclass(pc->cc, c))
-						goto dead;
-				}
-				break;
-			case I_NCCLASS:
-				sp += chartorune(&c, sp);
-				if (c == 0)
-					goto dead;
-				if (flags & REG_ICASE) {
-					if (incclasscanon(pc->cc, canon(c)))
-						goto dead;
-				} else {
-					if (incclass(pc->cc, c))
-						goto dead;
-				}
-				break;
-			case I_REF:
-				i = (int)(sub.sub[pc->n].ep - sub.sub[pc->n].sp);
-				if (flags & REG_ICASE) {
-					if (strncmpcanon(sp, sub.sub[pc->n].sp, i))
-						goto dead;
-				} else {
-					if (strncmp(sp, sub.sub[pc->n].sp, i))
-						goto dead;
-				}
-				if (i > 0)
-					sp += i;
-				break;
-
-			case I_BOL:
-				if (sp == bol && !(flags & REG_NOTBOL))
-					break;
-				if (flags & REG_NEWLINE)
-					if (sp > bol && isnewline(sp[-1]))
-						break;
-				goto dead;
-			case I_EOL:
-				if (*sp == 0)
-					break;
-				if (flags & REG_NEWLINE)
-					if (isnewline(*sp))
-						break;
-				goto dead;
-			case I_WORD:
-				i = sp > bol && iswordchar(sp[-1]);
-				i ^= iswordchar(sp[0]);
-				if (i)
-					break;
-				goto dead;
-			case I_NWORD:
-				i = sp > bol && iswordchar(sp[-1]);
-				i ^= iswordchar(sp[0]);
-				if (!i)
-					break;
-				goto dead;
-
-			case I_LPAR:
-				sub.sub[pc->n].sp = sp;
-				break;
-			case I_RPAR:
-				sub.sub[pc->n].ep = sp;
-				break;
-			default:
-				goto dead;
-			}
-			pc = pc + 1;
-		}
-dead: ;
-	}
-	return 0;
-}
-
-int re_regexec(Reprog *prog, const char *sp, Resub *sub, int eflags)
-{
-	Resub scratch;
-	int i;
-
-	if (!sub)
-		sub = &scratch;
-
-	sub->nsub = prog->nsub;
-	for (i = 0; i < MAXSUB; ++i)
-		sub->sub[i].sp = sub->sub[i].ep = NULL;
-
-	return !match(prog->start, sp, sp, prog->flags | eflags, sub);
-}
-
-#ifdef TEST
-int main(int argc, char **argv)
-{
-	const char *error;
-	const char *s;
-	Reprog *p;
-	Resub m;
-	unsigned int i;
-
-	if (argc > 1) {
-		p = regcomp(argv[1], 0, &error);
-		if (!p) {
-			fprintf(stderr, "regcomp: %s\n", error);
-			return 1;
-		}
-
-		if (argc > 2) {
-			s = argv[2];
-			printf("nsub = %d\n", p->nsub);
-			if (!regexec(p, s, &m, 0)) {
-				for (i = 0; i < m.nsub; ++i) {
-					int n = m.sub[i].ep - m.sub[i].sp;
-					if (n > 0)
-						printf("match %d: s=%d e=%d n=%d '%.*s'\n", i, (int)(m.sub[i].sp - s), (int)(m.sub[i].ep - s), n, n, m.sub[i].sp);
-					else
-						printf("match %d: n=0 ''\n", i);
-				}
-			} else {
-				printf("no match\n");
-			}
-		}
-	}
-
-	return 0;
-}
-#endif

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/regexp.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/regexp.h b/thirdparty/librdkafka-0.11.1/src/regexp.h
deleted file mode 100644
index 535b02c..0000000
--- a/thirdparty/librdkafka-0.11.1/src/regexp.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef regexp_h
-#define regexp_h
-
-typedef struct Reprog Reprog;
-typedef struct Resub Resub;
-
-Reprog *re_regcomp(const char *pattern, int cflags, const char **errorp);
-int re_regexec(Reprog *prog, const char *string, Resub *sub, int eflags);
-void re_regfree(Reprog *prog);
-
-enum {
-	/* regcomp flags */
-	REG_ICASE = 1,
-	REG_NEWLINE = 2,
-
-	/* regexec flags */
-	REG_NOTBOL = 4,
-
-	/* limits */
-	REG_MAXSUB = 16
-};
-
-struct Resub {
-	unsigned int nsub;
-	struct {
-		const char *sp;
-		const char *ep;
-	} sub[REG_MAXSUB];
-};
-
-#endif

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/snappy.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/snappy.c b/thirdparty/librdkafka-0.11.1/src/snappy.c
deleted file mode 100644
index 376a432..0000000
--- a/thirdparty/librdkafka-0.11.1/src/snappy.c
+++ /dev/null
@@ -1,1834 +0,0 @@
-/*
- * C port of the snappy compressor from Google.
- * This is a very fast compressor with comparable compression to lzo.
- * Works best on 64bit little-endian, but should be good on others too.
- * Ported by Andi Kleen.
- * Uptodate with snappy 1.1.0
- */
-
-/*
- * Copyright 2005 Google Inc. All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-align"
-
-#ifndef SG
-#define SG /* Scatter-Gather / iovec support in Snappy */
-#endif
-
-#ifdef __KERNEL__
-#include <linux/kernel.h>
-#ifdef SG
-#include <linux/uio.h>
-#endif
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/snappy.h>
-#include <linux/vmalloc.h>
-#include <asm/unaligned.h>
-#else
-#include "snappy.h"
-#include "snappy_compat.h"
-#endif
-
-#include "rd.h"
-
-#ifdef _MSC_VER
-#define inline __inline
-#endif
-
-#define CRASH_UNLESS(x) BUG_ON(!(x))
-#define CHECK(cond) CRASH_UNLESS(cond)
-#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
-#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
-#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
-#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
-#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
-#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
-
-#define UNALIGNED_LOAD16(_p) get_unaligned((u16 *)(_p))
-#define UNALIGNED_LOAD32(_p) get_unaligned((u32 *)(_p))
-#define UNALIGNED_LOAD64(_p) get_unaligned64((u64 *)(_p))
-
-#define UNALIGNED_STORE16(_p, _val) put_unaligned(_val, (u16 *)(_p))
-#define UNALIGNED_STORE32(_p, _val) put_unaligned(_val, (u32 *)(_p))
-#define UNALIGNED_STORE64(_p, _val) put_unaligned64(_val, (u64 *)(_p))
-
-/*
- * This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64
- * on some platforms, in particular ARM.
- */
-static inline void unaligned_copy64(const void *src, void *dst)
-{
-	if (sizeof(void *) == 8) {
-		UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
-	} else {
-		const char *src_char = (const char *)(src);
-		char *dst_char = (char *)(dst);
-
-		UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char));
-		UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4));
-	}
-}
-
-#ifdef NDEBUG
-
-#define DCHECK(cond) do {} while(0)
-#define DCHECK_LE(a, b) do {} while(0)
-#define DCHECK_GE(a, b) do {} while(0)
-#define DCHECK_EQ(a, b) do {} while(0)
-#define DCHECK_NE(a, b) do {} while(0)
-#define DCHECK_LT(a, b) do {} while(0)
-#define DCHECK_GT(a, b) do {} while(0)
-
-#else
-
-#define DCHECK(cond) CHECK(cond)
-#define DCHECK_LE(a, b) CHECK_LE(a, b)
-#define DCHECK_GE(a, b) CHECK_GE(a, b)
-#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
-#define DCHECK_NE(a, b) CHECK_NE(a, b)
-#define DCHECK_LT(a, b) CHECK_LT(a, b)
-#define DCHECK_GT(a, b) CHECK_GT(a, b)
-
-#endif
-
-static inline bool is_little_endian(void)
-{
-#ifdef __LITTLE_ENDIAN__
-	return true;
-#endif
-	return false;
-}
-
-#if defined(__xlc__) // xlc compiler on AIX
-#define rd_clz(n)   __cntlz4(n)
-#define rd_ctz(n)   __cnttz4(n)
-#define rd_ctz64(n) __cnttz8(n)
-
-#elif defined(__SUNPRO_C) // Solaris Studio compiler on sun  
-/*
- * Source for following definitions is Hacker’s Delight, Second Edition by Henry S. Warren
- * http://www.hackersdelight.org/permissions.htm
- */
-u32 rd_clz(u32 x) {
-   u32 n;
-
-   if (x == 0) return(32);
-   n = 1;
-   if ((x >> 16) == 0) {n = n +16; x = x <<16;}
-   if ((x >> 24) == 0) {n = n + 8; x = x << 8;}
-   if ((x >> 28) == 0) {n = n + 4; x = x << 4;}
-   if ((x >> 30) == 0) {n = n + 2; x = x << 2;}
-   n = n - (x >> 31);
-   return n;
-}
-
-u32 rd_ctz(u32 x) {
-   u32 y;
-   u32 n;
-
-   if (x == 0) return 32;
-   n = 31;
-   y = x <<16;  if (y != 0) {n = n -16; x = y;}
-   y = x << 8;  if (y != 0) {n = n - 8; x = y;}
-   y = x << 4;  if (y != 0) {n = n - 4; x = y;}
-   y = x << 2;  if (y != 0) {n = n - 2; x = y;}
-   y = x << 1;  if (y != 0) {n = n - 1;}
-   return n;
-}
-
-u64 rd_ctz64(u64 x) {
-   u64 y;
-   u64 n;
-
-   if (x == 0) return 64;
-   n = 63;
-   y = x <<32;  if (y != 0) {n = n -32; x = y;}
-   y = x <<16;  if (y != 0) {n = n -16; x = y;}
-   y = x << 8;  if (y != 0) {n = n - 8; x = y;}
-   y = x << 4;  if (y != 0) {n = n - 4; x = y;}
-   y = x << 2;  if (y != 0) {n = n - 2; x = y;}
-   y = x << 1;  if (y != 0) {n = n - 1;}
-   return n;
-}
-#elif !defined(_MSC_VER)
-#define rd_clz(n)   __builtin_clz(n)
-#define rd_ctz(n)   __builtin_ctz(n)
-#define rd_ctz64(n) __builtin_ctzll(n)
-#else
-#include <intrin.h>
-static int inline rd_clz(u32 x) {
-	int r = 0;
-	if (_BitScanForward(&r, x))
-		return 31 - r;
-	else
-		return 32;
-}
-
-static int inline rd_ctz(u32 x) {
-	int r = 0;
-	if (_BitScanForward(&r, x))
-		return r;
-	else
-		return 32;
-}
-
-static int inline rd_ctz64(u64 x) {
-#ifdef _M_X64
-	int r = 0;
-	if (_BitScanReverse64(&r, x))
-		return r;
-	else
-		return 64;
-#else
-	int r;
-	if ((r = rd_ctz(x & 0xffffffff)) < 32)
-		return r;
-	return 32 + rd_ctz(x >> 32);
-#endif
-}
-#endif
-
-
-static inline int log2_floor(u32 n)
-{
-	return n == 0 ? -1 : 31 ^ rd_clz(n);
-}
-
-static inline RD_UNUSED int find_lsb_set_non_zero(u32 n)
-{
-	return rd_ctz(n);
-}
-
-static inline RD_UNUSED int find_lsb_set_non_zero64(u64 n)
-{
-	return rd_ctz64(n);
-}
-
-#define kmax32 5
-
-/*
- * Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
- *  Never reads a character at or beyond limit.  If a valid/terminated varint32
- * was found in the range, stores it in *OUTPUT and returns a pointer just
- * past the last byte of the varint32. Else returns NULL.  On success,
- * "result <= limit".
- */
-static inline const char *varint_parse32_with_limit(const char *p,
-						    const char *l,
-						    u32 * OUTPUT)
-{
-	const unsigned char *ptr = (const unsigned char *)(p);
-	const unsigned char *limit = (const unsigned char *)(l);
-	u32 b, result;
-
-	if (ptr >= limit)
-		return NULL;
-	b = *(ptr++);
-	result = b & 127;
-	if (b < 128)
-		goto done;
-	if (ptr >= limit)
-		return NULL;
-	b = *(ptr++);
-	result |= (b & 127) << 7;
-	if (b < 128)
-		goto done;
-	if (ptr >= limit)
-		return NULL;
-	b = *(ptr++);
-	result |= (b & 127) << 14;
-	if (b < 128)
-		goto done;
-	if (ptr >= limit)
-		return NULL;
-	b = *(ptr++);
-	result |= (b & 127) << 21;
-	if (b < 128)
-		goto done;
-	if (ptr >= limit)
-		return NULL;
-	b = *(ptr++);
-	result |= (b & 127) << 28;
-	if (b < 16)
-		goto done;
-	return NULL;		/* Value is too long to be a varint32 */
-done:
-	*OUTPUT = result;
-	return (const char *)(ptr);
-}
-
-/*
- * REQUIRES   "ptr" points to a buffer of length sufficient to hold "v".
- *  EFFECTS    Encodes "v" into "ptr" and returns a pointer to the
- *            byte just past the last encoded byte.
- */
-static inline char *varint_encode32(char *sptr, u32 v)
-{
-	/* Operate on characters as unsigneds */
-	unsigned char *ptr = (unsigned char *)(sptr);
-	static const int B = 128;
-
-	if (v < (1 << 7)) {
-		*(ptr++) = v;
-	} else if (v < (1 << 14)) {
-		*(ptr++) = v | B;
-		*(ptr++) = v >> 7;
-	} else if (v < (1 << 21)) {
-		*(ptr++) = v | B;
-		*(ptr++) = (v >> 7) | B;
-		*(ptr++) = v >> 14;
-	} else if (v < (1 << 28)) {
-		*(ptr++) = v | B;
-		*(ptr++) = (v >> 7) | B;
-		*(ptr++) = (v >> 14) | B;
-		*(ptr++) = v >> 21;
-	} else {
-		*(ptr++) = v | B;
-		*(ptr++) = (v >> 7) | B;
-		*(ptr++) = (v >> 14) | B;
-		*(ptr++) = (v >> 21) | B;
-		*(ptr++) = v >> 28;
-	}
-	return (char *)(ptr);
-}
-
-#ifdef SG
-
-static inline void *n_bytes_after_addr(void *addr, size_t n_bytes)
-{
-    return (void *) ((char *)addr + n_bytes);
-}
-
-struct source {
-	struct iovec *iov;
-	int iovlen;
-	int curvec;
-	int curoff;
-	size_t total;
-};
-
-/* Only valid at beginning when nothing is consumed */
-static inline int available(struct source *s)
-{
-	return (int) s->total;
-}
-
-static inline const char *peek(struct source *s, size_t *len)
-{
-	if (likely(s->curvec < s->iovlen)) {
-		struct iovec *iv = &s->iov[s->curvec];
-		if ((unsigned)s->curoff < (size_t)iv->iov_len) { 
-			*len = iv->iov_len - s->curoff;
-			return n_bytes_after_addr(iv->iov_base, s->curoff);
-		}
-	}
-	*len = 0;
-	return NULL;
-}
-
-static inline void skip(struct source *s, size_t n)
-{
-	struct iovec *iv = &s->iov[s->curvec];
-	s->curoff += (int) n;
-	DCHECK_LE((unsigned)s->curoff, (size_t)iv->iov_len);
-	if ((unsigned)s->curoff >= (size_t)iv->iov_len &&
-	    s->curvec + 1 < s->iovlen) {
-		s->curoff = 0;
-		s->curvec++;
-	}
-}
-
-struct sink {
-	struct iovec *iov;
-	int iovlen;
-	unsigned curvec;
-	unsigned curoff;
-	unsigned written;
-};
-
-static inline void append(struct sink *s, const char *data, size_t n)
-{
-	struct iovec *iov = &s->iov[s->curvec];
-	char *dst = n_bytes_after_addr(iov->iov_base, s->curoff);
-	size_t nlen = min_t(size_t, iov->iov_len - s->curoff, n);
-	if (data != dst)
-		memcpy(dst, data, nlen);
-	s->written += (int) n;
-	s->curoff += (int) nlen;
-	while ((n -= nlen) > 0) {
-		data += nlen;
-		s->curvec++;
-		DCHECK_LT((signed)s->curvec, s->iovlen);
-		iov++;
-		nlen = min_t(size_t, (size_t)iov->iov_len, n);
-		memcpy(iov->iov_base, data, nlen);
-		s->curoff = (int) nlen;
-	}
-}
-
-static inline void *sink_peek(struct sink *s, size_t n)
-{
-	struct iovec *iov = &s->iov[s->curvec];
-	if (s->curvec < (size_t)iov->iov_len && iov->iov_len - s->curoff >= n)
-		return n_bytes_after_addr(iov->iov_base, s->curoff);
-	return NULL;
-}
-
-#else
-
-struct source {
-	const char *ptr;
-	size_t left;
-};
-
-static inline int available(struct source *s)
-{
-	return s->left;
-}
-
-static inline const char *peek(struct source *s, size_t * len)
-{
-	*len = s->left;
-	return s->ptr;
-}
-
-static inline void skip(struct source *s, size_t n)
-{
-	s->left -= n;
-	s->ptr += n;
-}
-
-struct sink {
-	char *dest;
-};
-
-static inline void append(struct sink *s, const char *data, size_t n)
-{
-	if (data != s->dest)
-		memcpy(s->dest, data, n);
-	s->dest += n;
-}
-
-#define sink_peek(s, n) sink_peek_no_sg(s)
-
-static inline void *sink_peek_no_sg(const struct sink *s)
-{
-	return s->dest;
-}
-
-#endif
-
-struct writer {
-	char *base;
-	char *op;
-	char *op_limit;
-};
-
-/* Called before decompression */
-static inline void writer_set_expected_length(struct writer *w, size_t len)
-{
-	w->op_limit = w->op + len;
-}
-
-/* Called after decompression */
-static inline bool writer_check_length(struct writer *w)
-{
-	return w->op == w->op_limit;
-}
-
-/*
- * Copy "len" bytes from "src" to "op", one byte at a time.  Used for
- *  handling COPY operations where the input and output regions may
- * overlap.  For example, suppose:
- *    src    == "ab"
- *    op     == src + 2
- *    len    == 20
- * After IncrementalCopy(src, op, len), the result will have
- * eleven copies of "ab"
- *    ababababababababababab
- * Note that this does not match the semantics of either memcpy()
- * or memmove().
- */
-static inline void incremental_copy(const char *src, char *op, ssize_t len)
-{
-	DCHECK_GT(len, 0);
-	do {
-		*op++ = *src++;
-	} while (--len > 0);
-}
-
-/*
- * Equivalent to IncrementalCopy except that it can write up to ten extra
- *  bytes after the end of the copy, and that it is faster.
- *
- * The main part of this loop is a simple copy of eight bytes at a time until
- * we've copied (at least) the requested amount of bytes.  However, if op and
- * src are less than eight bytes apart (indicating a repeating pattern of
- * length < 8), we first need to expand the pattern in order to get the correct
- * results. For instance, if the buffer looks like this, with the eight-byte
- * <src> and <op> patterns marked as intervals:
- *
- *    abxxxxxxxxxxxx
- *    [------]           src
- *      [------]         op
- *
- * a single eight-byte copy from <src> to <op> will repeat the pattern once,
- * after which we can move <op> two bytes without moving <src>:
- *
- *    ababxxxxxxxxxx
- *    [------]           src
- *        [------]       op
- *
- * and repeat the exercise until the two no longer overlap.
- *
- * This allows us to do very well in the special case of one single byte
- * repeated many times, without taking a big hit for more general cases.
- *
- * The worst case of extra writing past the end of the match occurs when
- * op - src == 1 and len == 1; the last copy will read from byte positions
- * [0..7] and write to [4..11], whereas it was only supposed to write to
- * position 1. Thus, ten excess bytes.
- */
-
-#define kmax_increment_copy_overflow  10
-
-static inline void incremental_copy_fast_path(const char *src, char *op,
-					      ssize_t len)
-{
-	while (op - src < 8) {
-		unaligned_copy64(src, op);
-		len -= op - src;
-		op += op - src;
-	}
-	while (len > 0) {
-		unaligned_copy64(src, op);
-		src += 8;
-		op += 8;
-		len -= 8;
-	}
-}
-
-static inline bool writer_append_from_self(struct writer *w, u32 offset,
-					   u32 len)
-{
-	char *const op = w->op;
-	CHECK_LE(op, w->op_limit);
-	const u32 space_left = (u32) (w->op_limit - op);
-
-	if ((unsigned)(op - w->base) <= offset - 1u)	/* -1u catches offset==0 */
-		return false;
-	if (len <= 16 && offset >= 8 && space_left >= 16) {
-		/* Fast path, used for the majority (70-80%) of dynamic
-		 * invocations. */
-		unaligned_copy64(op - offset, op);
-		unaligned_copy64(op - offset + 8, op + 8);
-	} else {
-		if (space_left >= len + kmax_increment_copy_overflow) {
-			incremental_copy_fast_path(op - offset, op, len);
-		} else {
-			if (space_left < len) {
-				return false;
-			}
-			incremental_copy(op - offset, op, len);
-		}
-	}
-
-	w->op = op + len;
-	return true;
-}
-
-static inline bool writer_append(struct writer *w, const char *ip, u32 len)
-{
-	char *const op = w->op;
-	CHECK_LE(op, w->op_limit);
-	const u32 space_left = (u32) (w->op_limit - op);
-	if (space_left < len)
-		return false;
-	memcpy(op, ip, len);
-	w->op = op + len;
-	return true;
-}
-
-static inline bool writer_try_fast_append(struct writer *w, const char *ip, 
-					  u32 available_bytes, u32 len)
-{
-	char *const op = w->op;
-	const int space_left = (int) (w->op_limit - op);
-	if (len <= 16 && available_bytes >= 16 && space_left >= 16) {
-		/* Fast path, used for the majority (~95%) of invocations */
-		unaligned_copy64(ip, op);
-		unaligned_copy64(ip + 8, op + 8);
-		w->op = op + len;
-		return true;
-	}
-	return false;
-}
-
-/*
- * Any hash function will produce a valid compressed bitstream, but a good
- * hash function reduces the number of collisions and thus yields better
- * compression for compressible input, and more speed for incompressible
- * input. Of course, it doesn't hurt if the hash function is reasonably fast
- * either, as it gets called a lot.
- */
-static inline u32 hash_bytes(u32 bytes, int shift)
-{
-	u32 kmul = 0x1e35a7bd;
-	return (bytes * kmul) >> shift;
-}
-
-static inline u32 hash(const char *p, int shift)
-{
-	return hash_bytes(UNALIGNED_LOAD32(p), shift);
-}
-
-/*
- * Compressed data can be defined as:
- *    compressed := item* literal*
- *    item       := literal* copy
- *
- * The trailing literal sequence has a space blowup of at most 62/60
- * since a literal of length 60 needs one tag byte + one extra byte
- * for length information.
- *
- * Item blowup is trickier to measure.  Suppose the "copy" op copies
- * 4 bytes of data.  Because of a special check in the encoding code,
- * we produce a 4-byte copy only if the offset is < 65536.  Therefore
- * the copy op takes 3 bytes to encode, and this type of item leads
- * to at most the 62/60 blowup for representing literals.
- *
- * Suppose the "copy" op copies 5 bytes of data.  If the offset is big
- * enough, it will take 5 bytes to encode the copy op.  Therefore the
- * worst case here is a one-byte literal followed by a five-byte copy.
- * I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
- *
- * This last factor dominates the blowup, so the final estimate is:
- */
-size_t rd_kafka_snappy_max_compressed_length(size_t source_len)
-{
-	return 32 + source_len + source_len / 6;
-}
-EXPORT_SYMBOL(rd_kafka_snappy_max_compressed_length);
-
-enum {
-	LITERAL = 0,
-	COPY_1_BYTE_OFFSET = 1,	/* 3 bit length + 3 bits of offset in opcode */
-	COPY_2_BYTE_OFFSET = 2,
-	COPY_4_BYTE_OFFSET = 3
-};
-
-static inline char *emit_literal(char *op,
-				 const char *literal,
-				 int len, bool allow_fast_path)
-{
-	int n = len - 1;	/* Zero-length literals are disallowed */
-
-	if (n < 60) {
-		/* Fits in tag byte */
-		*op++ = LITERAL | (n << 2);
-
-/*
- * The vast majority of copies are below 16 bytes, for which a
- * call to memcpy is overkill. This fast path can sometimes
- * copy up to 15 bytes too much, but that is okay in the
- * main loop, since we have a bit to go on for both sides:
- *
- *   - The input will always have kInputMarginBytes = 15 extra
- *     available bytes, as long as we're in the main loop, and
- *     if not, allow_fast_path = false.
- *   - The output will always have 32 spare bytes (see
- *     MaxCompressedLength).
- */
-		if (allow_fast_path && len <= 16) {
-			unaligned_copy64(literal, op);
-			unaligned_copy64(literal + 8, op + 8);
-			return op + len;
-		}
-	} else {
-		/* Encode in upcoming bytes */
-		char *base = op;
-		int count = 0;
-		op++;
-		while (n > 0) {
-			*op++ = n & 0xff;
-			n >>= 8;
-			count++;
-		}
-		DCHECK(count >= 1);
-		DCHECK(count <= 4);
-		*base = LITERAL | ((59 + count) << 2);
-	}
-	memcpy(op, literal, len);
-	return op + len;
-}
-
-static inline char *emit_copy_less_than64(char *op, int offset, int len)
-{
-	DCHECK_LE(len, 64);
-	DCHECK_GE(len, 4);
-	DCHECK_LT(offset, 65536);
-
-	if ((len < 12) && (offset < 2048)) {
-		int len_minus_4 = len - 4;
-		DCHECK(len_minus_4 < 8);	/* Must fit in 3 bits */
-		*op++ =
-		    COPY_1_BYTE_OFFSET + ((len_minus_4) << 2) + ((offset >> 8)
-								 << 5);
-		*op++ = offset & 0xff;
-	} else {
-		*op++ = COPY_2_BYTE_OFFSET + ((len - 1) << 2);
-		put_unaligned_le16(offset, op);
-		op += 2;
-	}
-	return op;
-}
-
-static inline char *emit_copy(char *op, int offset, int len)
-{
-	/*
-	 * Emit 64 byte copies but make sure to keep at least four bytes
-	 * reserved
-	 */
-	while (len >= 68) {
-		op = emit_copy_less_than64(op, offset, 64);
-		len -= 64;
-	}
-
-	/*
-	 * Emit an extra 60 byte copy if have too much data to fit in
-	 * one copy
-	 */
-	if (len > 64) {
-		op = emit_copy_less_than64(op, offset, 60);
-		len -= 60;
-	}
-
-	/* Emit remainder */
-	op = emit_copy_less_than64(op, offset, len);
-	return op;
-}
-
-/**
- * rd_kafka_snappy_uncompressed_length - return length of uncompressed output.
- * @start: compressed buffer
- * @n: length of compressed buffer.
- * @result: Write the length of the uncompressed output here.
- *
- * Returns true when successfull, otherwise false.
- */
-bool rd_kafka_snappy_uncompressed_length(const char *start, size_t n, size_t * result)
-{
-	u32 v = 0;
-	const char *limit = start + n;
-	if (varint_parse32_with_limit(start, limit, &v) != NULL) {
-		*result = v;
-		return true;
-	} else {
-		return false;
-	}
-}
-EXPORT_SYMBOL(rd_kafka_snappy_uncompressed_length);
-
-/*
- * The size of a compression block. Note that many parts of the compression
- * code assumes that kBlockSize <= 65536; in particular, the hash table
- * can only store 16-bit offsets, and EmitCopy() also assumes the offset
- * is 65535 bytes or less. Note also that if you change this, it will
- * affect the framing format
- * Note that there might be older data around that is compressed with larger
- * block sizes, so the decompression code should not rely on the
- * non-existence of long backreferences.
- */
-#define kblock_log 16
-#define kblock_size (1 << kblock_log)
-
-/* 
- * This value could be halfed or quartered to save memory 
- * at the cost of slightly worse compression.
- */
-#define kmax_hash_table_bits 14
-#define kmax_hash_table_size (1U << kmax_hash_table_bits)
-
-/*
- * Use smaller hash table when input.size() is smaller, since we
- * fill the table, incurring O(hash table size) overhead for
- * compression, and if the input is short, we won't need that
- * many hash table entries anyway.
- */
-static u16 *get_hash_table(struct snappy_env *env, size_t input_size,
-			      int *table_size)
-{
-	unsigned htsize = 256;
-
-	DCHECK(kmax_hash_table_size >= 256);
-	while (htsize < kmax_hash_table_size && htsize < input_size)
-		htsize <<= 1;
-	CHECK_EQ(0, htsize & (htsize - 1));
-	CHECK_LE(htsize, kmax_hash_table_size);
-
-	u16 *table;
-	table = env->hash_table;
-
-	*table_size = htsize;
-	memset(table, 0, htsize * sizeof(*table));
-	return table;
-}
-
-/*
- * Return the largest n such that
- *
- *   s1[0,n-1] == s2[0,n-1]
- *   and n <= (s2_limit - s2).
- *
- * Does not read *s2_limit or beyond.
- * Does not read *(s1 + (s2_limit - s2)) or beyond.
- * Requires that s2_limit >= s2.
- *
- * Separate implementation for x86_64, for speed.  Uses the fact that
- * x86_64 is little endian.
- */
-#if defined(__LITTLE_ENDIAN__) && BITS_PER_LONG == 64
-static inline int find_match_length(const char *s1,
-				    const char *s2, const char *s2_limit)
-{
-	int matched = 0;
-
-	DCHECK_GE(s2_limit, s2);
-	/*
-	 * Find out how long the match is. We loop over the data 64 bits at a
-	 * time until we find a 64-bit block that doesn't match; then we find
-	 * the first non-matching bit and use that to calculate the total
-	 * length of the match.
-	 */
-	while (likely(s2 <= s2_limit - 8)) {
-		if (unlikely
-		    (UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched))) {
-			s2 += 8;
-			matched += 8;
-		} else {
-			/*
-			 * On current (mid-2008) Opteron models there
-			 * is a 3% more efficient code sequence to
-			 * find the first non-matching byte.  However,
-			 * what follows is ~10% better on Intel Core 2
-			 * and newer, and we expect AMD's bsf
-			 * instruction to improve.
-			 */
-			u64 x =
-			    UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 +
-								    matched);
-			int matching_bits = find_lsb_set_non_zero64(x);
-			matched += matching_bits >> 3;
-			return matched;
-		}
-	}
-	while (likely(s2 < s2_limit)) {
-		if (likely(s1[matched] == *s2)) {
-			++s2;
-			++matched;
-		} else {
-			return matched;
-		}
-	}
-	return matched;
-}
-#else
-static inline int find_match_length(const char *s1,
-				    const char *s2, const char *s2_limit)
-{
-	/* Implementation based on the x86-64 version, above. */
-	DCHECK_GE(s2_limit, s2);
-	int matched = 0;
-
-	while (s2 <= s2_limit - 4 &&
-	       UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
-		s2 += 4;
-		matched += 4;
-	}
-	if (is_little_endian() && s2 <= s2_limit - 4) {
-		u32 x =
-		    UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
-		int matching_bits = find_lsb_set_non_zero(x);
-		matched += matching_bits >> 3;
-	} else {
-		while ((s2 < s2_limit) && (s1[matched] == *s2)) {
-			++s2;
-			++matched;
-		}
-	}
-	return matched;
-}
-#endif
-
-/*
- * For 0 <= offset <= 4, GetU32AtOffset(GetEightBytesAt(p), offset) will
- *  equal UNALIGNED_LOAD32(p + offset).  Motivation: On x86-64 hardware we have
- * empirically found that overlapping loads such as
- *  UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
- * are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to u32.
- *
- * We have different versions for 64- and 32-bit; ideally we would avoid the
- * two functions and just inline the UNALIGNED_LOAD64 call into
- * GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
- * enough to avoid loading the value multiple times then. For 64-bit, the load
- * is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
- * done at GetUint32AtOffset() time.
- */
-
-#if BITS_PER_LONG == 64
-
-typedef u64 eight_bytes_reference;
-
-static inline eight_bytes_reference get_eight_bytes_at(const char* ptr)
-{
-	return UNALIGNED_LOAD64(ptr);
-}
-
-static inline u32 get_u32_at_offset(u64 v, int offset)
-{
-	DCHECK_GE(offset, 0);
-	DCHECK_LE(offset, 4);
-	return v >> (is_little_endian()? 8 * offset : 32 - 8 * offset);
-}
-
-#else
-
-typedef const char *eight_bytes_reference;
-
-static inline eight_bytes_reference get_eight_bytes_at(const char* ptr) 
-{
-	return ptr;
-}
-
-static inline u32 get_u32_at_offset(const char *v, int offset) 
-{
-	DCHECK_GE(offset, 0);
-	DCHECK_LE(offset, 4);
-	return UNALIGNED_LOAD32(v + offset);
-}
-#endif
-
-/*
- * Flat array compression that does not emit the "uncompressed length"
- *  prefix. Compresses "input" string to the "*op" buffer.
- *
- * REQUIRES: "input" is at most "kBlockSize" bytes long.
- * REQUIRES: "op" points to an array of memory that is at least
- * "MaxCompressedLength(input.size())" in size.
- * REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
- * REQUIRES: "table_size" is a power of two
- *
- * Returns an "end" pointer into "op" buffer.
- * "end - op" is the compressed size of "input".
- */
-
-static char *compress_fragment(const char *const input,
-			       const size_t input_size,
-			       char *op, u16 * table, const unsigned table_size)
-{
-	/* "ip" is the input pointer, and "op" is the output pointer. */
-	const char *ip = input;
-	CHECK_LE(input_size, kblock_size);
-	CHECK_EQ(table_size & (table_size - 1), 0);
-	const int shift = 32 - log2_floor(table_size);
-	DCHECK_EQ(UINT_MAX >> shift, table_size - 1);
-	const char *ip_end = input + input_size;
-	const char *baseip = ip;
-	/*
-	 * Bytes in [next_emit, ip) will be emitted as literal bytes.  Or
-	 *  [next_emit, ip_end) after the main loop.
-	 */
-	const char *next_emit = ip;
-
-	const unsigned kinput_margin_bytes = 15;
-
-	if (likely(input_size >= kinput_margin_bytes)) {
-		const char *const ip_limit = input + input_size -
-			kinput_margin_bytes;
-
-		u32 next_hash;
-		for (next_hash = hash(++ip, shift);;) {
-			DCHECK_LT(next_emit, ip);
-/*
- * The body of this loop calls EmitLiteral once and then EmitCopy one or
- * more times.  (The exception is that when we're close to exhausting
- * the input we goto emit_remainder.)
- *
- * In the first iteration of this loop we're just starting, so
- * there's nothing to copy, so calling EmitLiteral once is
- * necessary.  And we only start a new iteration when the
- * current iteration has determined that a call to EmitLiteral will
- * precede the next call to EmitCopy (if any).
- *
- * Step 1: Scan forward in the input looking for a 4-byte-long match.
- * If we get close to exhausting the input then goto emit_remainder.
- *
- * Heuristic match skipping: If 32 bytes are scanned with no matches
- * found, start looking only at every other byte. If 32 more bytes are
- * scanned, look at every third byte, etc.. When a match is found,
- * immediately go back to looking at every byte. This is a small loss
- * (~5% performance, ~0.1% density) for lcompressible data due to more
- * bookkeeping, but for non-compressible data (such as JPEG) it's a huge
- * win since the compressor quickly "realizes" the data is incompressible
- * and doesn't bother looking for matches everywhere.
- *
- * The "skip" variable keeps track of how many bytes there are since the
- * last match; dividing it by 32 (ie. right-shifting by five) gives the
- * number of bytes to move ahead for each iteration.
- */
-			u32 skip_bytes = 32;
-
-			const char *next_ip = ip;
-			const char *candidate;
-			do {
-				ip = next_ip;
-				u32 hval = next_hash;
-				DCHECK_EQ(hval, hash(ip, shift));
-				u32 bytes_between_hash_lookups = skip_bytes++ >> 5;
-				next_ip = ip + bytes_between_hash_lookups;
-				if (unlikely(next_ip > ip_limit)) {
-					goto emit_remainder;
-				}
-				next_hash = hash(next_ip, shift);
-				candidate = baseip + table[hval];
-				DCHECK_GE(candidate, baseip);
-				DCHECK_LT(candidate, ip);
-
-				table[hval] = (u16) (ip - baseip);
-			} while (likely(UNALIGNED_LOAD32(ip) !=
-					UNALIGNED_LOAD32(candidate)));
-
-/*
- * Step 2: A 4-byte match has been found.  We'll later see if more
- * than 4 bytes match.  But, prior to the match, input
- * bytes [next_emit, ip) are unmatched.  Emit them as "literal bytes."
- */
-			DCHECK_LE(next_emit + 16, ip_end);
-			op = emit_literal(op, next_emit, (int) (ip - next_emit), true);
-
-/*
- * Step 3: Call EmitCopy, and then see if another EmitCopy could
- * be our next move.  Repeat until we find no match for the
- * input immediately after what was consumed by the last EmitCopy call.
- *
- * If we exit this loop normally then we need to call EmitLiteral next,
- * though we don't yet know how big the literal will be.  We handle that
- * by proceeding to the next iteration of the main loop.  We also can exit
- * this loop via goto if we get close to exhausting the input.
- */
-			eight_bytes_reference input_bytes;
-			u32 candidate_bytes = 0;
-
-			do {
-/*
- * We have a 4-byte match at ip, and no need to emit any
- *  "literal bytes" prior to ip.
- */
-				const char *base = ip;
-				int matched = 4 +
-				    find_match_length(candidate + 4, ip + 4,
-						      ip_end);
-				ip += matched;
-				int offset = (int) (base - candidate);
-				DCHECK_EQ(0, memcmp(base, candidate, matched));
-				op = emit_copy(op, offset, matched);
-/*
- * We could immediately start working at ip now, but to improve
- * compression we first update table[Hash(ip - 1, ...)].
- */
-				const char *insert_tail = ip - 1;
-				next_emit = ip;
-				if (unlikely(ip >= ip_limit)) {
-					goto emit_remainder;
-				}
-				input_bytes = get_eight_bytes_at(insert_tail);
-				u32 prev_hash =
-				    hash_bytes(get_u32_at_offset
-					       (input_bytes, 0), shift);
-				table[prev_hash] = (u16) (ip - baseip - 1);
-				u32 cur_hash =
-				    hash_bytes(get_u32_at_offset
-					       (input_bytes, 1), shift);
-				candidate = baseip + table[cur_hash];
-				candidate_bytes = UNALIGNED_LOAD32(candidate);
-				table[cur_hash] = (u16) (ip - baseip);
-			} while (get_u32_at_offset(input_bytes, 1) ==
-				 candidate_bytes);
-
-			next_hash =
-			    hash_bytes(get_u32_at_offset(input_bytes, 2),
-				       shift);
-			++ip;
-		}
-	}
-
-emit_remainder:
-	/* Emit the remaining bytes as a literal */
-	if (next_emit < ip_end)
-		op = emit_literal(op, next_emit, (int) (ip_end - next_emit), false);
-
-	return op;
-}
-
-/*
- * -----------------------------------------------------------------------
- *  Lookup table for decompression code.  Generated by ComputeTable() below.
- * -----------------------------------------------------------------------
- */
-
-/* Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits */
-static const u32 wordmask[] = {
-	0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
-};
-
-/*
- * Data stored per entry in lookup table:
- *       Range   Bits-used       Description
- *      ------------------------------------
- *      1..64   0..7            Literal/copy length encoded in opcode byte
- *      0..7    8..10           Copy offset encoded in opcode byte / 256
- *      0..4    11..13          Extra bytes after opcode
- *
- * We use eight bits for the length even though 7 would have sufficed
- * because of efficiency reasons:
- *      (1) Extracting a byte is faster than a bit-field
- *      (2) It properly aligns copy offset so we do not need a <<8
- */
-static const u16 char_table[256] = {
-	0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
-	0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
-	0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
-	0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
-	0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
-	0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
-	0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
-	0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
-	0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
-	0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
-	0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
-	0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
-	0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
-	0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
-	0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
-	0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
-	0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
-	0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
-	0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
-	0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
-	0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
-	0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
-	0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
-	0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
-	0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
-	0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
-	0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
-	0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
-	0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
-	0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
-	0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
-	0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
-};
-
-struct snappy_decompressor {
-	struct source *reader;	/* Underlying source of bytes to decompress */
-	const char *ip;		/* Points to next buffered byte */
-	const char *ip_limit;	/* Points just past buffered bytes */
-	u32 peeked;		/* Bytes peeked from reader (need to skip) */
-	bool eof;		/* Hit end of input without an error? */
-	char scratch[5];	/* Temporary buffer for peekfast boundaries */
-};
-
-static void
-init_snappy_decompressor(struct snappy_decompressor *d, struct source *reader)
-{
-	d->reader = reader;
-	d->ip = NULL;
-	d->ip_limit = NULL;
-	d->peeked = 0;
-	d->eof = false;
-}
-
-static void exit_snappy_decompressor(struct snappy_decompressor *d)
-{
-	skip(d->reader, d->peeked);
-}
-
-/*
- * Read the uncompressed length stored at the start of the compressed data.
- * On succcess, stores the length in *result and returns true.
- * On failure, returns false.
- */
-static bool read_uncompressed_length(struct snappy_decompressor *d,
-				     u32 * result)
-{
-	DCHECK(d->ip == NULL);	/*
-				 * Must not have read anything yet
-				 * Length is encoded in 1..5 bytes
-				 */
-	*result = 0;
-	u32 shift = 0;
-	while (true) {
-		if (shift >= 32)
-			return false;
-		size_t n;
-		const char *ip = peek(d->reader, &n);
-		if (n == 0)
-			return false;
-		const unsigned char c = *(const unsigned char *)(ip);
-		skip(d->reader, 1);
-		*result |= (u32) (c & 0x7f) << shift;
-		if (c < 128) {
-			break;
-		}
-		shift += 7;
-	}
-	return true;
-}
-
-static bool refill_tag(struct snappy_decompressor *d);
-
-/*
- * Process the next item found in the input.
- * Returns true if successful, false on error or end of input.
- */
-static void decompress_all_tags(struct snappy_decompressor *d,
-				struct writer *writer)
-{
-	const char *ip = d->ip;
-
-	/*
-	 * We could have put this refill fragment only at the beginning of the loop.
-	 * However, duplicating it at the end of each branch gives the compiler more
-	 * scope to optimize the <ip_limit_ - ip> expression based on the local
-	 * context, which overall increases speed.
-	 */
-#define MAYBE_REFILL() \
-        if (d->ip_limit - ip < 5) {		\
-		d->ip = ip;			\
-		if (!refill_tag(d)) return;	\
-		ip = d->ip;			\
-        }
-
-
-	MAYBE_REFILL();
-	for (;;) {
-		if (d->ip_limit - ip < 5) {
-			d->ip = ip;
-			if (!refill_tag(d))
-				return;
-			ip = d->ip;
-		}
-
-		const unsigned char c = *(const unsigned char *)(ip++);
-
-		if ((c & 0x3) == LITERAL) {
-			u32 literal_length = (c >> 2) + 1;
-			if (writer_try_fast_append(writer, ip, (u32) (d->ip_limit - ip), 
-						   literal_length)) {
-				DCHECK_LT(literal_length, 61);
-				ip += literal_length;
-				MAYBE_REFILL();
-				continue;
-			}
-			if (unlikely(literal_length >= 61)) {
-				/* Long literal */
-				const u32 literal_ll = literal_length - 60;
-				literal_length = (get_unaligned_le32(ip) &
-						  wordmask[literal_ll]) + 1;
-				ip += literal_ll;
-			}
-
-			u32 avail = (u32) (d->ip_limit - ip);
-			while (avail < literal_length) {
-				if (!writer_append(writer, ip, avail))
-					return;
-				literal_length -= avail;
-				skip(d->reader, d->peeked);
-				size_t n;
-				ip = peek(d->reader, &n);
-				avail = (u32) n;
-				d->peeked = avail;
-				if (avail == 0)
-					return;	/* Premature end of input */
-				d->ip_limit = ip + avail;
-			}
-			if (!writer_append(writer, ip, literal_length))
-				return;
-			ip += literal_length;
-			MAYBE_REFILL();
-		} else {
-			const u32 entry = char_table[c];
-			const u32 trailer = get_unaligned_le32(ip) &
-				wordmask[entry >> 11];
-			const u32 length = entry & 0xff;
-			ip += entry >> 11;
-
-			/*
-			 * copy_offset/256 is encoded in bits 8..10.
-			 * By just fetching those bits, we get
-			 * copy_offset (since the bit-field starts at
-			 * bit 8).
-			 */
-			const u32 copy_offset = entry & 0x700;
-			if (!writer_append_from_self(writer,
-						     copy_offset + trailer,
-						     length))
-				return;
-			MAYBE_REFILL();
-		}
-	}
-}
-
-#undef MAYBE_REFILL
-
-static bool refill_tag(struct snappy_decompressor *d)
-{
-	const char *ip = d->ip;
-
-	if (ip == d->ip_limit) {
-		size_t n;
-		/* Fetch a new fragment from the reader */
-		skip(d->reader, d->peeked); /* All peeked bytes are used up */
-		ip = peek(d->reader, &n);
-		d->peeked = (u32) n;
-		if (n == 0) {
-			d->eof = true;
-			return false;
-		}
-		d->ip_limit = ip + n;
-	}
-
-	/* Read the tag character */
-	DCHECK_LT(ip, d->ip_limit);
-	const unsigned char c = *(const unsigned char *)(ip);
-	const u32 entry = char_table[c];
-	const u32 needed = (entry >> 11) + 1;	/* +1 byte for 'c' */
-	DCHECK_LE(needed, sizeof(d->scratch));
-
-	/* Read more bytes from reader if needed */
-	u32 nbuf = (u32) (d->ip_limit - ip);
-
-	if (nbuf < needed) {
-		/*
-		 * Stitch together bytes from ip and reader to form the word
-		 * contents.  We store the needed bytes in "scratch".  They
-		 * will be consumed immediately by the caller since we do not
-		 * read more than we need.
-		 */
-		memmove(d->scratch, ip, nbuf);
-		skip(d->reader, d->peeked); /* All peeked bytes are used up */
-		d->peeked = 0;
-		while (nbuf < needed) {
-			size_t length;
-			const char *src = peek(d->reader, &length);
-			if (length == 0)
-				return false;
-			u32 to_add = min_t(u32, needed - nbuf, (u32) length);
-			memcpy(d->scratch + nbuf, src, to_add);
-			nbuf += to_add;
-			skip(d->reader, to_add);
-		}
-		DCHECK_EQ(nbuf, needed);
-		d->ip = d->scratch;
-		d->ip_limit = d->scratch + needed;
-	} else if (nbuf < 5) {
-		/*
-		 * Have enough bytes, but move into scratch so that we do not
-		 * read past end of input
-		 */
-		memmove(d->scratch, ip, nbuf);
-		skip(d->reader, d->peeked); /* All peeked bytes are used up */
-		d->peeked = 0;
-		d->ip = d->scratch;
-		d->ip_limit = d->scratch + nbuf;
-	} else {
-		/* Pass pointer to buffer returned by reader. */
-		d->ip = ip;
-	}
-	return true;
-}
-
-static int internal_uncompress(struct source *r,
-			       struct writer *writer, u32 max_len)
-{
-	struct snappy_decompressor decompressor;
-	u32 uncompressed_len = 0;
-
-	init_snappy_decompressor(&decompressor, r);
-
-	if (!read_uncompressed_length(&decompressor, &uncompressed_len))
-		return -EIO;
-	/* Protect against possible DoS attack */
-	if ((u64) (uncompressed_len) > max_len)
-		return -EIO;
-
-	writer_set_expected_length(writer, uncompressed_len);
-
-	/* Process the entire input */
-	decompress_all_tags(&decompressor, writer);
-
-	exit_snappy_decompressor(&decompressor);
-	if (decompressor.eof && writer_check_length(writer))
-		return 0;
-	return -EIO;
-}
-
-static inline int sn_compress(struct snappy_env *env, struct source *reader,
-			   struct sink *writer)
-{
-	int err;
-	size_t written = 0;
-	int N = available(reader);
-	char ulength[kmax32];
-	char *p = varint_encode32(ulength, N);
-
-	append(writer, ulength, p - ulength);
-	written += (p - ulength);
-
-	while (N > 0) {
-		/* Get next block to compress (without copying if possible) */
-		size_t fragment_size;
-		const char *fragment = peek(reader, &fragment_size);
-		if (fragment_size == 0) {
-			err = -EIO;
-			goto out;
-		}
-		const unsigned num_to_read = min_t(int, N, kblock_size);
-		size_t bytes_read = fragment_size;
-
-		int pending_advance = 0;
-		if (bytes_read >= num_to_read) {
-			/* Buffer returned by reader is large enough */
-			pending_advance = num_to_read;
-			fragment_size = num_to_read;
-		}
-		else {
-			memcpy(env->scratch, fragment, bytes_read);
-			skip(reader, bytes_read);
-
-			while (bytes_read < num_to_read) {
-				fragment = peek(reader, &fragment_size);
-				size_t n =
-				    min_t(size_t, fragment_size,
-					  num_to_read - bytes_read);
-				memcpy((char *)(env->scratch) + bytes_read, fragment, n);
-				bytes_read += n;
-				skip(reader, n);
-			}
-			DCHECK_EQ(bytes_read, num_to_read);
-			fragment = env->scratch;
-			fragment_size = num_to_read;
-		}
-		if (fragment_size < num_to_read)
-			return -EIO;
-
-		/* Get encoding table for compression */
-		int table_size;
-		u16 *table = get_hash_table(env, num_to_read, &table_size);
-
-		/* Compress input_fragment and append to dest */
-		char *dest;
-		dest = sink_peek(writer, rd_kafka_snappy_max_compressed_length(num_to_read));
-		if (!dest) {
-			/*
-			 * Need a scratch buffer for the output,
-			 * because the byte sink doesn't have enough
-			 * in one piece.
-			 */
-			dest = env->scratch_output;
-		}
-		char *end = compress_fragment(fragment, fragment_size,
-					      dest, table, table_size);
-		append(writer, dest, end - dest);
-		written += (end - dest);
-
-		N -= num_to_read;
-		skip(reader, pending_advance);
-	}
-
-	err = 0;
-out:
-	return err;
-}
-
-#ifdef SG
-
-int rd_kafka_snappy_compress_iov(struct snappy_env *env,
-                                 const struct iovec *iov_in, size_t iov_in_cnt,
-                                 size_t input_length,
-                                 struct iovec *iov_out) {
-        struct source reader = {
-                .iov = (struct iovec *)iov_in,
-                .iovlen = (int)iov_in_cnt,
-                .total = input_length
-        };
-        struct sink writer = {
-                .iov = iov_out,
-                .iovlen = 1
-        };
-        int err = sn_compress(env, &reader, &writer);
-
-        iov_out->iov_len = writer.written;
-
-        return err;
-}
-EXPORT_SYMBOL(rd_kafka_snappy_compress_iov);
-
-/**
- * rd_kafka_snappy_compress - Compress a buffer using the snappy compressor.
- * @env: Preallocated environment
- * @input: Input buffer
- * @input_length: Length of input_buffer
- * @compressed: Output buffer for compressed data
- * @compressed_length: The real length of the output written here.
- *
- * Return 0 on success, otherwise an negative error code.
- *
- * The output buffer must be at least
- * rd_kafka_snappy_max_compressed_length(input_length) bytes long.
- *
- * Requires a preallocated environment from rd_kafka_snappy_init_env.
- * The environment does not keep state over individual calls
- * of this function, just preallocates the memory.
- */
-int rd_kafka_snappy_compress(struct snappy_env *env,
-		    const char *input,
-		    size_t input_length,
-		    char *compressed, size_t *compressed_length)
-{
-	struct iovec iov_in = {
-		.iov_base = (char *)input,
-		.iov_len = input_length,
-	};
-	struct iovec iov_out = {
-		.iov_base = compressed,
-		.iov_len = 0xffffffff,
-	};
-        return rd_kafka_snappy_compress_iov(env,
-                                            &iov_in, 1, input_length,
-                                            &iov_out);
-}
-EXPORT_SYMBOL(rd_kafka_snappy_compress);
-
-int rd_kafka_snappy_uncompress_iov(struct iovec *iov_in, int iov_in_len,
-			   size_t input_len, char *uncompressed)
-{
-	struct source reader = {
-		.iov = iov_in,
-		.iovlen = iov_in_len,
-		.total = input_len
-	};
-	struct writer output = {
-		.base = uncompressed,
-		.op = uncompressed
-	};
-	return internal_uncompress(&reader, &output, 0xffffffff);
-}
-EXPORT_SYMBOL(rd_kafka_snappy_uncompress_iov);
-
-/**
- * rd_kafka_snappy_uncompress - Uncompress a snappy compressed buffer
- * @compressed: Input buffer with compressed data
- * @n: length of compressed buffer
- * @uncompressed: buffer for uncompressed data
- *
- * The uncompressed data buffer must be at least
- * rd_kafka_snappy_uncompressed_length(compressed) bytes long.
- *
- * Return 0 on success, otherwise an negative error code.
- */
-int rd_kafka_snappy_uncompress(const char *compressed, size_t n, char *uncompressed)
-{
-	struct iovec iov = {
-		.iov_base = (char *)compressed,
-		.iov_len = n
-	};
-	return rd_kafka_snappy_uncompress_iov(&iov, 1, n, uncompressed);
-}
-EXPORT_SYMBOL(rd_kafka_snappy_uncompress);
-
-
-/**
- * @brief Decompress Snappy message with Snappy-java framing.
- *
- * @returns a malloced buffer with the uncompressed data, or NULL on failure.
- */
-char *rd_kafka_snappy_java_uncompress (const char *inbuf, size_t inlen,
-                                       size_t *outlenp,
-                                       char *errstr, size_t errstr_size) {
-        int pass;
-        char *outbuf = NULL;
-
-        /**
-         * Traverse all chunks in two passes:
-         *  pass 1: calculate total uncompressed length
-         *  pass 2: uncompress
-         *
-         * Each chunk is prefixed with 4: length */
-
-        for (pass = 1 ; pass <= 2 ; pass++) {
-                ssize_t of = 0;  /* inbuf offset */
-                ssize_t uof = 0; /* outbuf offset */
-
-                while (of + 4 <= (ssize_t)inlen) {
-                        uint32_t clen; /* compressed length */
-                        size_t ulen; /* uncompressed length */
-                        int r;
-
-                        memcpy(&clen, inbuf+of, 4);
-                        clen = be32toh(clen);
-                        of += 4;
-
-                        if (unlikely(clen > inlen - of)) {
-                                rd_snprintf(errstr, errstr_size,
-                                            "Invalid snappy-java chunk length "
-                                            "%"PRId32" > %"PRIdsz
-                                            " available bytes",
-                                            clen, (ssize_t)inlen - of);
-                                return NULL;
-                        }
-
-                        /* Acquire uncompressed length */
-                        if (unlikely(!rd_kafka_snappy_uncompressed_length(
-                                             inbuf+of, clen, &ulen))) {
-                                rd_snprintf(errstr, errstr_size,
-                                            "Failed to get length of "
-                                            "(snappy-java framed) Snappy "
-                                            "compressed payload "
-                                            "(clen %"PRId32")",
-                                            clen);
-                                return NULL;
-                        }
-
-                        if (pass == 1) {
-                                /* pass 1: calculate total length */
-                                of  += clen;
-                                uof += ulen;
-                                continue;
-                        }
-
-                        /* pass 2: Uncompress to outbuf */
-                        if (unlikely((r = rd_kafka_snappy_uncompress(
-                                              inbuf+of, clen, outbuf+uof)))) {
-                                rd_snprintf(errstr, errstr_size,
-                                            "Failed to decompress Snappy-java "
-                                            "framed payload of size %"PRId32
-                                            ": %s",
-                                            clen,
-                                            rd_strerror(-r/*negative errno*/));
-                                rd_free(outbuf);
-                                return NULL;
-                        }
-
-                        of  += clen;
-                        uof += ulen;
-                }
-
-                if (unlikely(of != (ssize_t)inlen)) {
-                        rd_snprintf(errstr, errstr_size,
-                                    "%"PRIusz" trailing bytes in Snappy-java "
-                                    "framed compressed data",
-                                    inlen - of);
-                        if (outbuf)
-                                rd_free(outbuf);
-                        return NULL;
-                }
-
-                if (pass == 1) {
-                        if (uof <= 0) {
-                                rd_snprintf(errstr, errstr_size,
-                                            "Empty Snappy-java framed data");
-                                return NULL;
-                        }
-
-                        /* Allocate memory for uncompressed data */
-                        outbuf = rd_malloc(uof);
-                        if (unlikely(!outbuf)) {
-                                rd_snprintf(errstr, errstr_size,
-                                           "Failed to allocate memory "
-                                            "(%"PRIdsz") for "
-                                            "uncompressed Snappy data: %s",
-                                            uof, rd_strerror(errno));
-                                return NULL;
-                        }
-
-                } else {
-                        /* pass 2 */
-                        *outlenp = uof;
-                }
-        }
-
-        return outbuf;
-}
-
-
-
-#else
-/**
- * rd_kafka_snappy_compress - Compress a buffer using the snappy compressor.
- * @env: Preallocated environment
- * @input: Input buffer
- * @input_length: Length of input_buffer
- * @compressed: Output buffer for compressed data
- * @compressed_length: The real length of the output written here.
- *
- * Return 0 on success, otherwise an negative error code.
- *
- * The output buffer must be at least
- * rd_kafka_snappy_max_compressed_length(input_length) bytes long.
- *
- * Requires a preallocated environment from rd_kafka_snappy_init_env.
- * The environment does not keep state over individual calls
- * of this function, just preallocates the memory.
- */
-int rd_kafka_snappy_compress(struct snappy_env *env,
-		    const char *input,
-		    size_t input_length,
-		    char *compressed, size_t *compressed_length)
-{
-	struct source reader = {
-		.ptr = input,
-		.left = input_length
-	};
-	struct sink writer = {
-		.dest = compressed,
-	};
-	int err = sn_compress(env, &reader, &writer);
-
-	/* Compute how many bytes were added */
-	*compressed_length = (writer.dest - compressed);
-	return err;
-}
-EXPORT_SYMBOL(rd_kafka_snappy_compress);
-
-/**
- * rd_kafka_snappy_uncompress - Uncompress a snappy compressed buffer
- * @compressed: Input buffer with compressed data
- * @n: length of compressed buffer
- * @uncompressed: buffer for uncompressed data
- *
- * The uncompressed data buffer must be at least
- * rd_kafka_snappy_uncompressed_length(compressed) bytes long.
- *
- * Return 0 on success, otherwise an negative error code.
- */
-int rd_kafka_snappy_uncompress(const char *compressed, size_t n, char *uncompressed)
-{
-	struct source reader = {
-		.ptr = compressed,
-		.left = n
-	};
-	struct writer output = {
-		.base = uncompressed,
-		.op = uncompressed
-	};
-	return internal_uncompress(&reader, &output, 0xffffffff);
-}
-EXPORT_SYMBOL(rd_kafka_snappy_uncompress);
-#endif
-
-static inline void clear_env(struct snappy_env *env)
-{
-    memset(env, 0, sizeof(*env));
-}
-
-#ifdef SG
-/**
- * rd_kafka_snappy_init_env_sg - Allocate snappy compression environment
- * @env: Environment to preallocate
- * @sg: Input environment ever does scather gather
- *
- * If false is passed to sg then multiple entries in an iovec
- * are not legal.
- * Returns 0 on success, otherwise negative errno.
- * Must run in process context.
- */
-int rd_kafka_snappy_init_env_sg(struct snappy_env *env, bool sg)
-{
-	if (rd_kafka_snappy_init_env(env) < 0)
-		goto error;
-
-	if (sg) {
-		env->scratch = vmalloc(kblock_size);
-		if (!env->scratch)
-			goto error;
-		env->scratch_output =
-			vmalloc(rd_kafka_snappy_max_compressed_length(kblock_size));
-		if (!env->scratch_output)
-			goto error;
-	}
-	return 0;
-error:
-	rd_kafka_snappy_free_env(env);
-	return -ENOMEM;
-}
-EXPORT_SYMBOL(rd_kafka_snappy_init_env_sg);
-#endif
-
-/**
- * rd_kafka_snappy_init_env - Allocate snappy compression environment
- * @env: Environment to preallocate
- *
- * Passing multiple entries in an iovec is not allowed
- * on the environment allocated here.
- * Returns 0 on success, otherwise negative errno.
- * Must run in process context.
- */
-int rd_kafka_snappy_init_env(struct snappy_env *env)
-{
-    clear_env(env);
-	env->hash_table = vmalloc(sizeof(u16) * kmax_hash_table_size);
-	if (!env->hash_table)
-		return -ENOMEM;
-	return 0;
-}
-EXPORT_SYMBOL(rd_kafka_snappy_init_env);
-
-/**
- * rd_kafka_snappy_free_env - Free an snappy compression environment
- * @env: Environment to free.
- *
- * Must run in process context.
- */
-void rd_kafka_snappy_free_env(struct snappy_env *env)
-{
-	vfree(env->hash_table);
-#ifdef SG
-	vfree(env->scratch);
-	vfree(env->scratch_output);
-#endif
-	clear_env(env);
-}
-EXPORT_SYMBOL(rd_kafka_snappy_free_env);
-
-#pragma GCC diagnostic pop /* -Wcast-align ignore */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/snappy.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/snappy.h b/thirdparty/librdkafka-0.11.1/src/snappy.h
deleted file mode 100644
index ca5a7dc..0000000
--- a/thirdparty/librdkafka-0.11.1/src/snappy.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef _LINUX_SNAPPY_H
-#define _LINUX_SNAPPY_H 1
-
-#include <stdbool.h>
-#include <stddef.h>
-
-/* Only needed for compression. This preallocates the worst case */
-struct snappy_env {
-	unsigned short *hash_table;
-	void *scratch;
-	void *scratch_output;
-};
-
-struct iovec;
-int rd_kafka_snappy_init_env(struct snappy_env *env);
-int rd_kafka_snappy_init_env_sg(struct snappy_env *env, bool sg);
-void rd_kafka_snappy_free_env(struct snappy_env *env);
-int rd_kafka_snappy_uncompress_iov(struct iovec *iov_in, int iov_in_len,
-			   size_t input_len, char *uncompressed);
-int rd_kafka_snappy_uncompress(const char *compressed, size_t n, char *uncompressed);
-char *rd_kafka_snappy_java_uncompress (const char *inbuf, size_t inlen,
-                                       size_t *outlenp,
-                                       char *errstr, size_t errstr_size);
-int rd_kafka_snappy_compress_iov(struct snappy_env *env,
-                                 const struct iovec *iov_in, size_t iov_in_cnt,
-                                 size_t input_length,
-                                 struct iovec *iov_out);
-bool rd_kafka_snappy_uncompressed_length(const char *buf, size_t len, size_t *result);
-size_t rd_kafka_snappy_max_compressed_length(size_t source_len);
-
-
-
-
-#endif

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/snappy_compat.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/snappy_compat.h b/thirdparty/librdkafka-0.11.1/src/snappy_compat.h
deleted file mode 100644
index 8282463..0000000
--- a/thirdparty/librdkafka-0.11.1/src/snappy_compat.h
+++ /dev/null
@@ -1,169 +0,0 @@
-#include "rdkafka_int.h"
-#include "rdendian.h"
-
-
-
-#ifdef __FreeBSD__
-#  include <sys/endian.h>
-#elif defined(__APPLE_CC_) || defined(__MACH__)  /* MacOS/X support */
-#  include <machine/endian.h>
-
-#if    __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN
-#  define	htole16(x) (x)
-#  define	le32toh(x) (x)
-#elif  __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN
-#  define	htole16(x) __DARWIN_OSSwapInt16(x)
-#  define	le32toh(x) __DARWIN_OSSwapInt32(x)
-#else
-#  error "Endianness is undefined"
-#endif
-
-
-#elif !defined(__WIN32__) && !defined(_MSC_VER) && !defined(sun) && !defined(_AIX)
-#  include <endian.h>
-#endif
-
-#include <stdlib.h>
-#include <assert.h>
-#include <string.h>
-#include <errno.h>
-#include <stdbool.h>
-#include <limits.h>
-#if !defined(__WIN32__) && !defined(_MSC_VER)
-#include <sys/uio.h>
-#endif
-
-#ifdef __ANDROID__
-#define le32toh letoh32
-#endif
-
-#if defined(__WIN32__) && defined(SG)
-struct iovec {
-	void *iov_base;	/* Pointer to data.  */
-	size_t iov_len;	/* Length of data.  */
-};
-#endif
-
-#define get_unaligned_memcpy(x) ({ \
-		typeof(*(x)) _ret; \
-		memcpy(&_ret, (x), sizeof(*(x))); \
-		_ret; })
-#define put_unaligned_memcpy(v,x) ({ \
-		typeof((v)) _v = (v); \
-		memcpy((x), &_v, sizeof(*(x))); })
-
-#define get_unaligned_direct(x) (*(x))
-#define put_unaligned_direct(v,x) (*(x) = (v))
-
-// Potentially unaligned loads and stores.
-// x86 and PowerPC can simply do these loads and stores native.
-#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || defined(_M_IX86) || defined(_M_X64) || defined(_M_AMD64)
-
-#define get_unaligned get_unaligned_direct
-#define put_unaligned put_unaligned_direct
-#define get_unaligned64 get_unaligned_direct
-#define put_unaligned64 put_unaligned_direct
-
-// ARMv7 and newer support native unaligned accesses, but only of 16-bit
-// and 32-bit values (not 64-bit); older versions either raise a fatal signal,
-// do an unaligned read and rotate the words around a bit, or do the reads very
-// slowly (trip through kernel mode). There's no simple #define that says just
-// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
-// sub-architectures.
-//
-// This is a mess, but there's not much we can do about it.
-#elif defined(__arm__) && \
-	!defined(__ARM_ARCH_4__) &&		\
-	!defined(__ARM_ARCH_4T__) &&		\
-	!defined(__ARM_ARCH_5__) &&		\
-	!defined(__ARM_ARCH_5T__) &&		\
-	!defined(__ARM_ARCH_5TE__) &&		\
-	!defined(__ARM_ARCH_5TEJ__) &&		\
-	!defined(__ARM_ARCH_6__) &&		\
-	!defined(__ARM_ARCH_6J__) &&		\
-	!defined(__ARM_ARCH_6K__) &&		\
-	!defined(__ARM_ARCH_6Z__) &&		\
-	!defined(__ARM_ARCH_6ZK__) &&		\
-	!defined(__ARM_ARCH_6T2__)
-
-#define get_unaligned get_unaligned_direct
-#define put_unaligned put_unaligned_direct
-#define get_unaligned64 get_unaligned_memcpy
-#define put_unaligned64 put_unaligned_memcpy
-
-// These macroses are provided for architectures that don't support
-// unaligned loads and stores.
-#else
-
-#define get_unaligned get_unaligned_memcpy
-#define put_unaligned put_unaligned_memcpy
-#define get_unaligned64 get_unaligned_memcpy
-#define put_unaligned64 put_unaligned_memcpy
-
-#endif
-
-#define get_unaligned_le32(x) (le32toh(get_unaligned((u32 *)(x))))
-#define put_unaligned_le16(v,x) (put_unaligned(htole16(v), (u16 *)(x)))
-
-typedef unsigned char u8;
-typedef unsigned short u16;
-typedef unsigned u32;
-typedef unsigned long long u64;
-
-#ifdef _MSC_VER
-#define BUG_ON(x) do { if (unlikely((x))) abort(); } while (0)
-#else
-#define BUG_ON(x) assert(!(x))
-#endif
-
-
-#define vmalloc(x) malloc(x)
-#define vfree(x) free(x)
-
-#define EXPORT_SYMBOL(x)
-
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
-
-#ifndef likely
-#define likely(x) __builtin_expect((x), 1)
-#define unlikely(x) __builtin_expect((x), 0)
-#endif
-
-#define min_t(t,x,y) ((x) < (y) ? (x) : (y))
-#define max_t(t,x,y) ((x) > (y) ? (x) : (y))
-
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-#define __LITTLE_ENDIAN__ 1
-#endif
-
-#if __LITTLE_ENDIAN__ == 1 || defined(__WIN32__)
-#ifndef htole16
-#define htole16(x) (x)
-#endif
-#ifndef le32toh
-#define le32toh(x) (x)
-#endif
-#endif
-
-
-#if defined(_MSC_VER)
-#if BYTE_ORDER == LITTLE_ENDIAN
-#define htole16(x) (x)
-#define le32toh(x) (x)
-
-#elif BYTE_ORDER == BIG_ENDIAN
-#define htole16(x) __builtin_bswap16(x)
-#define le32toh(x) __builtin_bswap32(x)
-#endif
-#endif
-
-#if defined(sun)
-#ifndef htole16
-#define htole16(x) LE_16(x)
-#endif
-#ifndef le32toh
-#define le32toh(x) LE_32(x)
-#endif
-#endif
-
-#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)


[38/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/queue.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/queue.h b/thirdparty/librdkafka-0.11.1/src/queue.h
deleted file mode 100644
index d1ba148..0000000
--- a/thirdparty/librdkafka-0.11.1/src/queue.h
+++ /dev/null
@@ -1,850 +0,0 @@
-/*	$NetBSD: queue.h,v 1.68 2014/11/19 08:10:01 uebayasi Exp $	*/
-
-/*
- * Copyright (c) 1991, 1993
- *	The Regents of the University of California.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *	@(#)queue.h	8.5 (Berkeley) 8/20/94
- */
-
-#ifndef	_SYS_QUEUE_H_
-#define	_SYS_QUEUE_H_
-
-/*
- * This file defines five types of data structures: singly-linked lists,
- * lists, simple queues, tail queues, and circular queues.
- *
- * A singly-linked list is headed by a single forward pointer. The
- * elements are singly linked for minimum space and pointer manipulation
- * overhead at the expense of O(n) removal for arbitrary elements. New
- * elements can be added to the list after an existing element or at the
- * head of the list.  Elements being removed from the head of the list
- * should use the explicit macro for this purpose for optimum
- * efficiency. A singly-linked list may only be traversed in the forward
- * direction.  Singly-linked lists are ideal for applications with large
- * datasets and few or no removals or for implementing a LIFO queue.
- *
- * A list is headed by a single forward pointer (or an array of forward
- * pointers for a hash table header). The elements are doubly linked
- * so that an arbitrary element can be removed without a need to
- * traverse the list. New elements can be added to the list before
- * or after an existing element or at the head of the list. A list
- * may only be traversed in the forward direction.
- *
- * A simple queue is headed by a pair of pointers, one the head of the
- * list and the other to the tail of the list. The elements are singly
- * linked to save space, so elements can only be removed from the
- * head of the list. New elements can be added to the list after
- * an existing element, at the head of the list, or at the end of the
- * list. A simple queue may only be traversed in the forward direction.
- *
- * A tail queue is headed by a pair of pointers, one to the head of the
- * list and the other to the tail of the list. The elements are doubly
- * linked so that an arbitrary element can be removed without a need to
- * traverse the list. New elements can be added to the list before or
- * after an existing element, at the head of the list, or at the end of
- * the list. A tail queue may be traversed in either direction.
- *
- * A circle queue is headed by a pair of pointers, one to the head of the
- * list and the other to the tail of the list. The elements are doubly
- * linked so that an arbitrary element can be removed without a need to
- * traverse the list. New elements can be added to the list before or after
- * an existing element, at the head of the list, or at the end of the list.
- * A circle queue may be traversed in either direction, but has a more
- * complex end of list detection.
- *
- * For details on the use of these macros, see the queue(3) manual page.
- */
-
-/*
- * Include the definition of NULL only on NetBSD because sys/null.h
- * is not available elsewhere.  This conditional makes the header
- * portable and it can simply be dropped verbatim into any system.
- * The caveat is that on other systems some other header
- * must provide NULL before the macros can be used.
- */
-#ifdef __NetBSD__
-#include <sys/null.h>
-#endif
-
-#if defined(QUEUEDEBUG)
-# if defined(_KERNEL)
-#  define QUEUEDEBUG_ABORT(...) panic(__VA_ARGS__)
-# else
-#  include <err.h>
-#  define QUEUEDEBUG_ABORT(...) err(1, __VA_ARGS__)
-# endif
-#endif
-
-/*
- * Singly-linked List definitions.
- */
-#define	SLIST_HEAD(name, type)						\
-struct name {								\
-	struct type *slh_first;	/* first element */			\
-}
-
-#define	SLIST_HEAD_INITIALIZER(head)					\
-	{ NULL }
-
-#define	SLIST_ENTRY(type)						\
-struct {								\
-	struct type *sle_next;	/* next element */			\
-}
-
-/*
- * Singly-linked List access methods.
- */
-#define	SLIST_FIRST(head)	((head)->slh_first)
-#define	SLIST_END(head)		NULL
-#define	SLIST_EMPTY(head)	((head)->slh_first == NULL)
-#define	SLIST_NEXT(elm, field)	((elm)->field.sle_next)
-
-#define	SLIST_FOREACH(var, head, field)					\
-	for((var) = (head)->slh_first;					\
-	    (var) != SLIST_END(head);					\
-	    (var) = (var)->field.sle_next)
-
-#define	SLIST_FOREACH_SAFE(var, head, field, tvar)			\
-	for ((var) = SLIST_FIRST((head));				\
-	    (var) != SLIST_END(head) &&					\
-	    ((tvar) = SLIST_NEXT((var), field), 1);			\
-	    (var) = (tvar))
-
-/*
- * Singly-linked List functions.
- */
-#define	SLIST_INIT(head) do {						\
-	(head)->slh_first = SLIST_END(head);				\
-} while (/*CONSTCOND*/0)
-
-#define	SLIST_INSERT_AFTER(slistelm, elm, field) do {			\
-	(elm)->field.sle_next = (slistelm)->field.sle_next;		\
-	(slistelm)->field.sle_next = (elm);				\
-} while (/*CONSTCOND*/0)
-
-#define	SLIST_INSERT_HEAD(head, elm, field) do {			\
-	(elm)->field.sle_next = (head)->slh_first;			\
-	(head)->slh_first = (elm);					\
-} while (/*CONSTCOND*/0)
-
-#define	SLIST_REMOVE_AFTER(slistelm, field) do {			\
-	(slistelm)->field.sle_next =					\
-	    SLIST_NEXT(SLIST_NEXT((slistelm), field), field);		\
-} while (/*CONSTCOND*/0)
-
-#define	SLIST_REMOVE_HEAD(head, field) do {				\
-	(head)->slh_first = (head)->slh_first->field.sle_next;		\
-} while (/*CONSTCOND*/0)
-
-#define	SLIST_REMOVE(head, elm, type, field) do {			\
-	if ((head)->slh_first == (elm)) {				\
-		SLIST_REMOVE_HEAD((head), field);			\
-	}								\
-	else {								\
-		struct type *curelm = (head)->slh_first;		\
-		while(curelm->field.sle_next != (elm))			\
-			curelm = curelm->field.sle_next;		\
-		curelm->field.sle_next =				\
-		    curelm->field.sle_next->field.sle_next;		\
-	}								\
-} while (/*CONSTCOND*/0)
-
-
-/*
- * List definitions.
- */
-#define	LIST_HEAD(name, type)						\
-struct name {								\
-	struct type *lh_first;	/* first element */			\
-}
-
-#define	LIST_HEAD_INITIALIZER(head)					\
-	{ NULL }
-
-#define	LIST_ENTRY(type)						\
-struct {								\
-	struct type *le_next;	/* next element */			\
-	struct type **le_prev;	/* address of previous next element */	\
-}
-
-/*
- * List access methods.
- */
-#define	LIST_FIRST(head)		((head)->lh_first)
-#define	LIST_END(head)			NULL
-#define	LIST_EMPTY(head)		((head)->lh_first == LIST_END(head))
-#define	LIST_NEXT(elm, field)		((elm)->field.le_next)
-
-#define	LIST_FOREACH(var, head, field)					\
-	for ((var) = ((head)->lh_first);				\
-	    (var) != LIST_END(head);					\
-	    (var) = ((var)->field.le_next))
-
-#define	LIST_FOREACH_SAFE(var, head, field, tvar)			\
-	for ((var) = LIST_FIRST((head));				\
-	    (var) != LIST_END(head) &&					\
-	    ((tvar) = LIST_NEXT((var), field), 1);			\
-	    (var) = (tvar))
-
-#define	LIST_MOVE(head1, head2) do {					\
-	LIST_INIT((head2));						\
-	if (!LIST_EMPTY((head1))) {					\
-		(head2)->lh_first = (head1)->lh_first;			\
-		LIST_INIT((head1));					\
-	}								\
-} while (/*CONSTCOND*/0)
-
-/*
- * List functions.
- */
-#if defined(QUEUEDEBUG)
-#define	QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)			\
-	if ((head)->lh_first &&						\
-	    (head)->lh_first->field.le_prev != &(head)->lh_first)	\
-		QUEUEDEBUG_ABORT("LIST_INSERT_HEAD %p %s:%d", (head),	\
-		    __FILE__, __LINE__);
-#define	QUEUEDEBUG_LIST_OP(elm, field)					\
-	if ((elm)->field.le_next &&					\
-	    (elm)->field.le_next->field.le_prev !=			\
-	    &(elm)->field.le_next)					\
-		QUEUEDEBUG_ABORT("LIST_* forw %p %s:%d", (elm),		\
-		    __FILE__, __LINE__);				\
-	if (*(elm)->field.le_prev != (elm))				\
-		QUEUEDEBUG_ABORT("LIST_* back %p %s:%d", (elm),		\
-		    __FILE__, __LINE__);
-#define	QUEUEDEBUG_LIST_POSTREMOVE(elm, field)				\
-	(elm)->field.le_next = (void *)1L;				\
-	(elm)->field.le_prev = (void *)1L;
-#else
-#define	QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
-#define	QUEUEDEBUG_LIST_OP(elm, field)
-#define	QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
-#endif
-
-#define	LIST_INIT(head) do {						\
-	(head)->lh_first = LIST_END(head);				\
-} while (/*CONSTCOND*/0)
-
-#define	LIST_INSERT_AFTER(listelm, elm, field) do {			\
-	QUEUEDEBUG_LIST_OP((listelm), field)				\
-	if (((elm)->field.le_next = (listelm)->field.le_next) != 	\
-	    LIST_END(head))						\
-		(listelm)->field.le_next->field.le_prev =		\
-		    &(elm)->field.le_next;				\
-	(listelm)->field.le_next = (elm);				\
-	(elm)->field.le_prev = &(listelm)->field.le_next;		\
-} while (/*CONSTCOND*/0)
-
-#define	LIST_INSERT_BEFORE(listelm, elm, field) do {			\
-	QUEUEDEBUG_LIST_OP((listelm), field)				\
-	(elm)->field.le_prev = (listelm)->field.le_prev;		\
-	(elm)->field.le_next = (listelm);				\
-	*(listelm)->field.le_prev = (elm);				\
-	(listelm)->field.le_prev = &(elm)->field.le_next;		\
-} while (/*CONSTCOND*/0)
-
-#define	LIST_INSERT_HEAD(head, elm, field) do {				\
-	QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field)		\
-	if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\
-		(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
-	(head)->lh_first = (elm);					\
-	(elm)->field.le_prev = &(head)->lh_first;			\
-} while (/*CONSTCOND*/0)
-
-#define	LIST_REMOVE(elm, field) do {					\
-	QUEUEDEBUG_LIST_OP((elm), field)				\
-	if ((elm)->field.le_next != NULL)				\
-		(elm)->field.le_next->field.le_prev = 			\
-		    (elm)->field.le_prev;				\
-	*(elm)->field.le_prev = (elm)->field.le_next;			\
-	QUEUEDEBUG_LIST_POSTREMOVE((elm), field)			\
-} while (/*CONSTCOND*/0)
-
-#define LIST_REPLACE(elm, elm2, field) do {				\
-	if (((elm2)->field.le_next = (elm)->field.le_next) != NULL)	\
-		(elm2)->field.le_next->field.le_prev =			\
-		    &(elm2)->field.le_next;				\
-	(elm2)->field.le_prev = (elm)->field.le_prev;			\
-	*(elm2)->field.le_prev = (elm2);				\
-	QUEUEDEBUG_LIST_POSTREMOVE((elm), field)			\
-} while (/*CONSTCOND*/0)
-
-/*
- * Simple queue definitions.
- */
-#define	SIMPLEQ_HEAD(name, type)					\
-struct name {								\
-	struct type *sqh_first;	/* first element */			\
-	struct type **sqh_last;	/* addr of last next element */		\
-}
-
-#define	SIMPLEQ_HEAD_INITIALIZER(head)					\
-	{ NULL, &(head).sqh_first }
-
-#define	SIMPLEQ_ENTRY(type)						\
-struct {								\
-	struct type *sqe_next;	/* next element */			\
-}
-
-/*
- * Simple queue access methods.
- */
-#define	SIMPLEQ_FIRST(head)		((head)->sqh_first)
-#define	SIMPLEQ_END(head)		NULL
-#define	SIMPLEQ_EMPTY(head)		((head)->sqh_first == SIMPLEQ_END(head))
-#define	SIMPLEQ_NEXT(elm, field)	((elm)->field.sqe_next)
-
-#define	SIMPLEQ_FOREACH(var, head, field)				\
-	for ((var) = ((head)->sqh_first);				\
-	    (var) != SIMPLEQ_END(head);					\
-	    (var) = ((var)->field.sqe_next))
-
-#define	SIMPLEQ_FOREACH_SAFE(var, head, field, next)			\
-	for ((var) = ((head)->sqh_first);				\
-	    (var) != SIMPLEQ_END(head) &&				\
-	    ((next = ((var)->field.sqe_next)), 1);			\
-	    (var) = (next))
-
-/*
- * Simple queue functions.
- */
-#define	SIMPLEQ_INIT(head) do {						\
-	(head)->sqh_first = NULL;					\
-	(head)->sqh_last = &(head)->sqh_first;				\
-} while (/*CONSTCOND*/0)
-
-#define	SIMPLEQ_INSERT_HEAD(head, elm, field) do {			\
-	if (((elm)->field.sqe_next = (head)->sqh_first) == NULL)	\
-		(head)->sqh_last = &(elm)->field.sqe_next;		\
-	(head)->sqh_first = (elm);					\
-} while (/*CONSTCOND*/0)
-
-#define	SIMPLEQ_INSERT_TAIL(head, elm, field) do {			\
-	(elm)->field.sqe_next = NULL;					\
-	*(head)->sqh_last = (elm);					\
-	(head)->sqh_last = &(elm)->field.sqe_next;			\
-} while (/*CONSTCOND*/0)
-
-#define	SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
-	if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
-		(head)->sqh_last = &(elm)->field.sqe_next;		\
-	(listelm)->field.sqe_next = (elm);				\
-} while (/*CONSTCOND*/0)
-
-#define	SIMPLEQ_REMOVE_HEAD(head, field) do {				\
-	if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
-		(head)->sqh_last = &(head)->sqh_first;			\
-} while (/*CONSTCOND*/0)
-
-#define SIMPLEQ_REMOVE_AFTER(head, elm, field) do {			\
-	if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
-	    == NULL)							\
-		(head)->sqh_last = &(elm)->field.sqe_next;		\
-} while (/*CONSTCOND*/0)
-
-#define	SIMPLEQ_REMOVE(head, elm, type, field) do {			\
-	if ((head)->sqh_first == (elm)) {				\
-		SIMPLEQ_REMOVE_HEAD((head), field);			\
-	} else {							\
-		struct type *curelm = (head)->sqh_first;		\
-		while (curelm->field.sqe_next != (elm))			\
-			curelm = curelm->field.sqe_next;		\
-		if ((curelm->field.sqe_next =				\
-			curelm->field.sqe_next->field.sqe_next) == NULL) \
-			    (head)->sqh_last = &(curelm)->field.sqe_next; \
-	}								\
-} while (/*CONSTCOND*/0)
-
-#define	SIMPLEQ_CONCAT(head1, head2) do {				\
-	if (!SIMPLEQ_EMPTY((head2))) {					\
-		*(head1)->sqh_last = (head2)->sqh_first;		\
-		(head1)->sqh_last = (head2)->sqh_last;		\
-		SIMPLEQ_INIT((head2));					\
-	}								\
-} while (/*CONSTCOND*/0)
-
-#define	SIMPLEQ_LAST(head, type, field)					\
-	(SIMPLEQ_EMPTY((head)) ?						\
-		NULL :							\
-	        ((struct type *)(void *)				\
-		((char *)((head)->sqh_last) - offsetof(struct type, field))))
-
-/*
- * Tail queue definitions.
- */
-#define	_TAILQ_HEAD(name, type, qual)					\
-struct name {								\
-	qual type *tqh_first;		/* first element */		\
-	qual type *qual *tqh_last;	/* addr of last next element */	\
-}
-#define TAILQ_HEAD(name, type)	_TAILQ_HEAD(name, struct type,)
-
-#define	TAILQ_HEAD_INITIALIZER(head)					\
-	{ TAILQ_END(head), &(head).tqh_first }
-
-#define	_TAILQ_ENTRY(type, qual)					\
-struct {								\
-	qual type *tqe_next;		/* next element */		\
-	qual type *qual *tqe_prev;	/* address of previous next element */\
-}
-#define TAILQ_ENTRY(type)	_TAILQ_ENTRY(struct type,)
-
-/*
- * Tail queue access methods.
- */
-#define	TAILQ_FIRST(head)		((head)->tqh_first)
-#define	TAILQ_END(head)			(NULL)
-#define	TAILQ_NEXT(elm, field)		((elm)->field.tqe_next)
-#define	TAILQ_LAST(head, headname) \
-	(*(((struct headname *)((head)->tqh_last))->tqh_last))
-#define	TAILQ_PREV(elm, headname, field) \
-	(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
-#define	TAILQ_EMPTY(head)		(TAILQ_FIRST(head) == TAILQ_END(head))
-
-
-#define	TAILQ_FOREACH(var, head, field)					\
-	for ((var) = ((head)->tqh_first);				\
-	    (var) != TAILQ_END(head);					\
-	    (var) = ((var)->field.tqe_next))
-
-#define	TAILQ_FOREACH_SAFE(var, head, field, next)			\
-	for ((var) = ((head)->tqh_first);				\
-	    (var) != TAILQ_END(head) &&					\
-	    ((next) = TAILQ_NEXT(var, field), 1); (var) = (next))
-
-#define	TAILQ_FOREACH_REVERSE(var, head, headname, field)		\
-	for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\
-	    (var) != TAILQ_END(head);					\
-	    (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
-
-#define	TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev)	\
-	for ((var) = TAILQ_LAST((head), headname);			\
-	    (var) != TAILQ_END(head) && 				\
-	    ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev))
-
-/*
- * Tail queue functions.
- */
-#if defined(QUEUEDEBUG)
-#define	QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)			\
-	if ((head)->tqh_first &&					\
-	    (head)->tqh_first->field.tqe_prev != &(head)->tqh_first)	\
-		QUEUEDEBUG_ABORT("TAILQ_INSERT_HEAD %p %s:%d", (head),	\
-		    __FILE__, __LINE__);
-#define	QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)			\
-	if (*(head)->tqh_last != NULL)					\
-		QUEUEDEBUG_ABORT("TAILQ_INSERT_TAIL %p %s:%d", (head),	\
-		    __FILE__, __LINE__);
-#define	QUEUEDEBUG_TAILQ_OP(elm, field)					\
-	if ((elm)->field.tqe_next &&					\
-	    (elm)->field.tqe_next->field.tqe_prev !=			\
-	    &(elm)->field.tqe_next)					\
-		QUEUEDEBUG_ABORT("TAILQ_* forw %p %s:%d", (elm),	\
-		    __FILE__, __LINE__);				\
-	if (*(elm)->field.tqe_prev != (elm))				\
-		QUEUEDEBUG_ABORT("TAILQ_* back %p %s:%d", (elm),	\
-		    __FILE__, __LINE__);
-#define	QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)			\
-	if ((elm)->field.tqe_next == NULL &&				\
-	    (head)->tqh_last != &(elm)->field.tqe_next)			\
-		QUEUEDEBUG_ABORT("TAILQ_PREREMOVE head %p elm %p %s:%d",\
-		    (head), (elm), __FILE__, __LINE__);
-#define	QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)				\
-	(elm)->field.tqe_next = (void *)1L;				\
-	(elm)->field.tqe_prev = (void *)1L;
-#else
-#define	QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
-#define	QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
-#define	QUEUEDEBUG_TAILQ_OP(elm, field)
-#define	QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
-#define	QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
-#endif
-
-#define	TAILQ_INIT(head) do {						\
-	(head)->tqh_first = TAILQ_END(head);				\
-	(head)->tqh_last = &(head)->tqh_first;				\
-} while (/*CONSTCOND*/0)
-
-#define	TAILQ_INSERT_HEAD(head, elm, field) do {			\
-	QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field)		\
-	if (((elm)->field.tqe_next = (head)->tqh_first) != TAILQ_END(head))\
-		(head)->tqh_first->field.tqe_prev =			\
-		    &(elm)->field.tqe_next;				\
-	else								\
-		(head)->tqh_last = &(elm)->field.tqe_next;		\
-	(head)->tqh_first = (elm);					\
-	(elm)->field.tqe_prev = &(head)->tqh_first;			\
-} while (/*CONSTCOND*/0)
-
-#define	TAILQ_INSERT_TAIL(head, elm, field) do {			\
-	QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field)		\
-	(elm)->field.tqe_next = TAILQ_END(head);			\
-	(elm)->field.tqe_prev = (head)->tqh_last;			\
-	*(head)->tqh_last = (elm);					\
-	(head)->tqh_last = &(elm)->field.tqe_next;			\
-} while (/*CONSTCOND*/0)
-
-#define	TAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
-	QUEUEDEBUG_TAILQ_OP((listelm), field)				\
-	if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != 	\
-	    TAILQ_END(head))						\
-		(elm)->field.tqe_next->field.tqe_prev = 		\
-		    &(elm)->field.tqe_next;				\
-	else								\
-		(head)->tqh_last = &(elm)->field.tqe_next;		\
-	(listelm)->field.tqe_next = (elm);				\
-	(elm)->field.tqe_prev = &(listelm)->field.tqe_next;		\
-} while (/*CONSTCOND*/0)
-
-#define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
-	QUEUEDEBUG_TAILQ_OP((listelm), field)				\
-	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
-	(elm)->field.tqe_next = (listelm);				\
-	*(listelm)->field.tqe_prev = (elm);				\
-	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
-} while (/*CONSTCOND*/0)
-
-#define	TAILQ_REMOVE(head, elm, field) do {				\
-	QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field)		\
-	QUEUEDEBUG_TAILQ_OP((elm), field)				\
-	if (((elm)->field.tqe_next) != TAILQ_END(head))			\
-		(elm)->field.tqe_next->field.tqe_prev = 		\
-		    (elm)->field.tqe_prev;				\
-	else								\
-		(head)->tqh_last = (elm)->field.tqe_prev;		\
-	*(elm)->field.tqe_prev = (elm)->field.tqe_next;			\
-	QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field);			\
-} while (/*CONSTCOND*/0)
-
-#define TAILQ_REPLACE(head, elm, elm2, field) do {			\
-        if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != 	\
-	    TAILQ_END(head))   						\
-                (elm2)->field.tqe_next->field.tqe_prev =		\
-                    &(elm2)->field.tqe_next;				\
-        else								\
-                (head)->tqh_last = &(elm2)->field.tqe_next;		\
-        (elm2)->field.tqe_prev = (elm)->field.tqe_prev;			\
-        *(elm2)->field.tqe_prev = (elm2);				\
-	QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field);			\
-} while (/*CONSTCOND*/0)
-
-#define	TAILQ_CONCAT(head1, head2, field) do {				\
-	if (!TAILQ_EMPTY(head2)) {					\
-		*(head1)->tqh_last = (head2)->tqh_first;		\
-		(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last;	\
-		(head1)->tqh_last = (head2)->tqh_last;			\
-		TAILQ_INIT((head2));					\
-	}								\
-} while (/*CONSTCOND*/0)
-
-/*
- * Singly-linked Tail queue declarations.
- */
-#define	STAILQ_HEAD(name, type)						\
-struct name {								\
-	struct type *stqh_first;	/* first element */		\
-	struct type **stqh_last;	/* addr of last next element */	\
-}
-
-#define	STAILQ_HEAD_INITIALIZER(head)					\
-	{ NULL, &(head).stqh_first }
-
-#define	STAILQ_ENTRY(type)						\
-struct {								\
-	struct type *stqe_next;	/* next element */			\
-}
-
-/*
- * Singly-linked Tail queue access methods.
- */
-#define	STAILQ_FIRST(head)	((head)->stqh_first)
-#define	STAILQ_END(head)	NULL
-#define	STAILQ_NEXT(elm, field)	((elm)->field.stqe_next)
-#define	STAILQ_EMPTY(head)	(STAILQ_FIRST(head) == STAILQ_END(head))
-
-/*
- * Singly-linked Tail queue functions.
- */
-#define	STAILQ_INIT(head) do {						\
-	(head)->stqh_first = NULL;					\
-	(head)->stqh_last = &(head)->stqh_first;				\
-} while (/*CONSTCOND*/0)
-
-#define	STAILQ_INSERT_HEAD(head, elm, field) do {			\
-	if (((elm)->field.stqe_next = (head)->stqh_first) == NULL)	\
-		(head)->stqh_last = &(elm)->field.stqe_next;		\
-	(head)->stqh_first = (elm);					\
-} while (/*CONSTCOND*/0)
-
-#define	STAILQ_INSERT_TAIL(head, elm, field) do {			\
-	(elm)->field.stqe_next = NULL;					\
-	*(head)->stqh_last = (elm);					\
-	(head)->stqh_last = &(elm)->field.stqe_next;			\
-} while (/*CONSTCOND*/0)
-
-#define	STAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
-	if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
-		(head)->stqh_last = &(elm)->field.stqe_next;		\
-	(listelm)->field.stqe_next = (elm);				\
-} while (/*CONSTCOND*/0)
-
-#define	STAILQ_REMOVE_HEAD(head, field) do {				\
-	if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
-		(head)->stqh_last = &(head)->stqh_first;			\
-} while (/*CONSTCOND*/0)
-
-#define	STAILQ_REMOVE(head, elm, type, field) do {			\
-	if ((head)->stqh_first == (elm)) {				\
-		STAILQ_REMOVE_HEAD((head), field);			\
-	} else {							\
-		struct type *curelm = (head)->stqh_first;		\
-		while (curelm->field.stqe_next != (elm))			\
-			curelm = curelm->field.stqe_next;		\
-		if ((curelm->field.stqe_next =				\
-			curelm->field.stqe_next->field.stqe_next) == NULL) \
-			    (head)->stqh_last = &(curelm)->field.stqe_next; \
-	}								\
-} while (/*CONSTCOND*/0)
-
-#define	STAILQ_FOREACH(var, head, field)				\
-	for ((var) = ((head)->stqh_first);				\
-		(var);							\
-		(var) = ((var)->field.stqe_next))
-
-#define	STAILQ_FOREACH_SAFE(var, head, field, tvar)			\
-	for ((var) = STAILQ_FIRST((head));				\
-	    (var) && ((tvar) = STAILQ_NEXT((var), field), 1);		\
-	    (var) = (tvar))
-
-#define	STAILQ_CONCAT(head1, head2) do {				\
-	if (!STAILQ_EMPTY((head2))) {					\
-		*(head1)->stqh_last = (head2)->stqh_first;		\
-		(head1)->stqh_last = (head2)->stqh_last;		\
-		STAILQ_INIT((head2));					\
-	}								\
-} while (/*CONSTCOND*/0)
-
-#define	STAILQ_LAST(head, type, field)					\
-	(STAILQ_EMPTY((head)) ?						\
-		NULL :							\
-	        ((struct type *)(void *)				\
-		((char *)((head)->stqh_last) - offsetof(struct type, field))))
-
-
-#ifndef _KERNEL
-/*
- * Circular queue definitions. Do not use. We still keep the macros
- * for compatibility but because of pointer aliasing issues their use
- * is discouraged!
- */
-
-/*
- * __launder_type():  We use this ugly hack to work around the the compiler
- * noticing that two types may not alias each other and elide tests in code.
- * We hit this in the CIRCLEQ macros when comparing 'struct name *' and
- * 'struct type *' (see CIRCLEQ_HEAD()).  Modern compilers (such as GCC
- * 4.8) declare these comparisons as always false, causing the code to
- * not run as designed.
- *
- * This hack is only to be used for comparisons and thus can be fully const.
- * Do not use for assignment.
- *
- * If we ever choose to change the ABI of the CIRCLEQ macros, we could fix
- * this by changing the head/tail sentinal values, but see the note above
- * this one.
- */
-#ifdef _MSC_VER
-#define __launder_type(x)  ((const void *)(x))
-#else
-static inline const void * __launder_type(const void *);
-static inline const void *
-__launder_type(const void *__x)
-{
-	__asm __volatile("" : "+r" (__x));
-	return __x;
-}
-#endif
-
-#if defined(QUEUEDEBUG)
-#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)				\
-	if ((head)->cqh_first != CIRCLEQ_ENDC(head) &&			\
-	    (head)->cqh_first->field.cqe_prev != CIRCLEQ_ENDC(head))	\
-		QUEUEDEBUG_ABORT("CIRCLEQ head forw %p %s:%d", (head),	\
-		      __FILE__, __LINE__);				\
-	if ((head)->cqh_last != CIRCLEQ_ENDC(head) &&			\
-	    (head)->cqh_last->field.cqe_next != CIRCLEQ_ENDC(head))	\
-		QUEUEDEBUG_ABORT("CIRCLEQ head back %p %s:%d", (head),	\
-		      __FILE__, __LINE__);
-#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)			\
-	if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) {		\
-		if ((head)->cqh_last != (elm))				\
-			QUEUEDEBUG_ABORT("CIRCLEQ elm last %p %s:%d",	\
-			    (elm), __FILE__, __LINE__);			\
-	} else {							\
-		if ((elm)->field.cqe_next->field.cqe_prev != (elm))	\
-			QUEUEDEBUG_ABORT("CIRCLEQ elm forw %p %s:%d",	\
-			    (elm), __FILE__, __LINE__);			\
-	}								\
-	if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) {		\
-		if ((head)->cqh_first != (elm))				\
-			QUEUEDEBUG_ABORT("CIRCLEQ elm first %p %s:%d",	\
-			    (elm), __FILE__, __LINE__);			\
-	} else {							\
-		if ((elm)->field.cqe_prev->field.cqe_next != (elm))	\
-			QUEUEDEBUG_ABORT("CIRCLEQ elm prev %p %s:%d",	\
-			    (elm), __FILE__, __LINE__);			\
-	}
-#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)			\
-	(elm)->field.cqe_next = (void *)1L;				\
-	(elm)->field.cqe_prev = (void *)1L;
-#else
-#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
-#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
-#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
-#endif
-
-#define	CIRCLEQ_HEAD(name, type)					\
-struct name {								\
-	struct type *cqh_first;		/* first element */		\
-	struct type *cqh_last;		/* last element */		\
-}
-
-#define	CIRCLEQ_HEAD_INITIALIZER(head)					\
-	{ CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
-
-#define	CIRCLEQ_ENTRY(type)						\
-struct {								\
-	struct type *cqe_next;		/* next element */		\
-	struct type *cqe_prev;		/* previous element */		\
-}
-
-/*
- * Circular queue functions.
- */
-#define	CIRCLEQ_INIT(head) do {						\
-	(head)->cqh_first = CIRCLEQ_END(head);				\
-	(head)->cqh_last = CIRCLEQ_END(head);				\
-} while (/*CONSTCOND*/0)
-
-#define	CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
-	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
-	QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field)		\
-	(elm)->field.cqe_next = (listelm)->field.cqe_next;		\
-	(elm)->field.cqe_prev = (listelm);				\
-	if ((listelm)->field.cqe_next == CIRCLEQ_ENDC(head))		\
-		(head)->cqh_last = (elm);				\
-	else								\
-		(listelm)->field.cqe_next->field.cqe_prev = (elm);	\
-	(listelm)->field.cqe_next = (elm);				\
-} while (/*CONSTCOND*/0)
-
-#define	CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do {		\
-	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
-	QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field)		\
-	(elm)->field.cqe_next = (listelm);				\
-	(elm)->field.cqe_prev = (listelm)->field.cqe_prev;		\
-	if ((listelm)->field.cqe_prev == CIRCLEQ_ENDC(head))		\
-		(head)->cqh_first = (elm);				\
-	else								\
-		(listelm)->field.cqe_prev->field.cqe_next = (elm);	\
-	(listelm)->field.cqe_prev = (elm);				\
-} while (/*CONSTCOND*/0)
-
-#define	CIRCLEQ_INSERT_HEAD(head, elm, field) do {			\
-	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
-	(elm)->field.cqe_next = (head)->cqh_first;			\
-	(elm)->field.cqe_prev = CIRCLEQ_END(head);			\
-	if ((head)->cqh_last == CIRCLEQ_ENDC(head))			\
-		(head)->cqh_last = (elm);				\
-	else								\
-		(head)->cqh_first->field.cqe_prev = (elm);		\
-	(head)->cqh_first = (elm);					\
-} while (/*CONSTCOND*/0)
-
-#define	CIRCLEQ_INSERT_TAIL(head, elm, field) do {			\
-	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
-	(elm)->field.cqe_next = CIRCLEQ_END(head);			\
-	(elm)->field.cqe_prev = (head)->cqh_last;			\
-	if ((head)->cqh_first == CIRCLEQ_ENDC(head))			\
-		(head)->cqh_first = (elm);				\
-	else								\
-		(head)->cqh_last->field.cqe_next = (elm);		\
-	(head)->cqh_last = (elm);					\
-} while (/*CONSTCOND*/0)
-
-#define	CIRCLEQ_REMOVE(head, elm, field) do {				\
-	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
-	QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field)			\
-	if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head))		\
-		(head)->cqh_last = (elm)->field.cqe_prev;		\
-	else								\
-		(elm)->field.cqe_next->field.cqe_prev =			\
-		    (elm)->field.cqe_prev;				\
-	if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head))		\
-		(head)->cqh_first = (elm)->field.cqe_next;		\
-	else								\
-		(elm)->field.cqe_prev->field.cqe_next =			\
-		    (elm)->field.cqe_next;				\
-	QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field)			\
-} while (/*CONSTCOND*/0)
-
-#define	CIRCLEQ_FOREACH(var, head, field)				\
-	for ((var) = ((head)->cqh_first);				\
-		(var) != CIRCLEQ_ENDC(head);				\
-		(var) = ((var)->field.cqe_next))
-
-#define	CIRCLEQ_FOREACH_REVERSE(var, head, field)			\
-	for ((var) = ((head)->cqh_last);				\
-		(var) != CIRCLEQ_ENDC(head);				\
-		(var) = ((var)->field.cqe_prev))
-
-/*
- * Circular queue access methods.
- */
-#define	CIRCLEQ_FIRST(head)		((head)->cqh_first)
-#define	CIRCLEQ_LAST(head)		((head)->cqh_last)
-/* For comparisons */
-#define	CIRCLEQ_ENDC(head)		(__launder_type(head))
-/* For assignments */
-#define	CIRCLEQ_END(head)		((void *)(head))
-#define	CIRCLEQ_NEXT(elm, field)	((elm)->field.cqe_next)
-#define	CIRCLEQ_PREV(elm, field)	((elm)->field.cqe_prev)
-#define	CIRCLEQ_EMPTY(head)						\
-    (CIRCLEQ_FIRST(head) == CIRCLEQ_ENDC(head))
-
-#define CIRCLEQ_LOOP_NEXT(head, elm, field)				\
-	(((elm)->field.cqe_next == CIRCLEQ_ENDC(head))			\
-	    ? ((head)->cqh_first)					\
-	    : (elm->field.cqe_next))
-#define CIRCLEQ_LOOP_PREV(head, elm, field)				\
-	(((elm)->field.cqe_prev == CIRCLEQ_ENDC(head))			\
-	    ? ((head)->cqh_last)					\
-	    : (elm->field.cqe_prev))
-#endif /* !_KERNEL */
-
-#endif	/* !_SYS_QUEUE_H_ */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rd.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rd.h b/thirdparty/librdkafka-0.11.1/src/rd.h
deleted file mode 100644
index 9c2700a..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rd.h
+++ /dev/null
@@ -1,455 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-
-#pragma once
-
-#ifndef _MSC_VER
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE  /* for strndup() */
-#endif
-#define __need_IOV_MAX
-#ifndef _POSIX_C_SOURCE
-#define _POSIX_C_SOURCE 200809L  /* for timespec on solaris */
-#endif
-#endif
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <time.h>
-#include <assert.h>
-#include <limits.h>
-
-#include "tinycthread.h"
-#include "rdsysqueue.h"
-
-#ifdef _MSC_VER
-/* Visual Studio */
-#include "win32_config.h"
-#else
-/* POSIX / UNIX based systems */
-#include "../config.h" /* mklove output */
-#endif
-
-#ifdef _MSC_VER
-/* Win32/Visual Studio */
-#include "rdwin32.h"
-
-#else
-/* POSIX / UNIX based systems */
-#include "rdposix.h"
-#endif
-
-#include "rdtypes.h"
-
-
-/* Debug assert, only enabled with --enable-devel */
-#if ENABLE_DEVEL == 1
-#define rd_dassert(cond) rd_assert(cond)
-#else
-#define rd_dassert(cond)  do {} while (0)
-#endif
-
-
-/** Assert if reached */
-#define RD_NOTREACHED() rd_kafka_assert(NULL, !*"/* NOTREACHED */ violated")
-
-
-
-/**
-* Allocator wrappers.
-* We serve under the premise that if a (small) memory
-* allocation fails all hope is lost and the application
-* will fail anyway, so no need to handle it handsomely.
-*/
-static RD_INLINE RD_UNUSED void *rd_calloc(size_t num, size_t sz) {
-	void *p = calloc(num, sz);
-	rd_assert(p);
-	return p;
-}
-
-static RD_INLINE RD_UNUSED void *rd_malloc(size_t sz) {
-	void *p = malloc(sz);
-	rd_assert(p);
-	return p;
-}
-
-static RD_INLINE RD_UNUSED void *rd_realloc(void *ptr, size_t sz) {
-	void *p = realloc(ptr, sz);
-	rd_assert(p);
-	return p;
-}
-
-static RD_INLINE RD_UNUSED void rd_free(void *ptr) {
-	free(ptr);
-}
-
-static RD_INLINE RD_UNUSED char *rd_strdup(const char *s) {
-#ifndef _MSC_VER
-	char *n = strdup(s);
-#else
-	char *n = _strdup(s);
-#endif
-	rd_assert(n);
-	return n;
-}
-
-static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) {
-#if HAVE_STRNDUP
-	char *n = strndup(s, len);
-	rd_assert(n);
-#else
-	char *n = malloc(len + 1);
-	rd_assert(n);
-	memcpy(n, s, len);
-	n[len] = '\0';
-#endif
-	return n;
-}
-
-
-
-/*
- * Portability
- */
-
-#ifdef strndupa
-#define rd_strndupa(DESTPTR,PTR,LEN)  (*(DESTPTR) = strndupa(PTR,LEN))
-#else
-#define rd_strndupa(DESTPTR,PTR,LEN) (*(DESTPTR) = rd_alloca(LEN+1), \
-      memcpy(*(DESTPTR), (PTR), LEN), *((*(DESTPTR))+(LEN)) = 0)
-#endif
-
-#ifdef strdupa
-#define rd_strdupa(DESTPTR,PTR)  (*(DESTPTR) = strdupa(PTR))
-#else
-#define rd_strdupa(DESTPTR,PTR)  rd_strndupa(DESTPTR,PTR,strlen(PTR))
-#endif
-
-#ifndef IOV_MAX
-#ifdef __APPLE__
-/* Some versions of MacOSX dont have IOV_MAX */
-#define IOV_MAX 1024
-#elif defined(_MSC_VER)
-/* There is no IOV_MAX on MSVC but it is used internally in librdkafka */
-#define IOV_MAX 1024
-#else
-#error "IOV_MAX not defined"
-#endif
-#endif
-
-
-/* Round/align X upwards to STRIDE, which must be power of 2. */
-#define RD_ROUNDUP(X,STRIDE) (((X) + ((STRIDE) - 1)) & ~(STRIDE-1))
-
-#define RD_ARRAY_SIZE(A)          (sizeof((A)) / sizeof(*(A)))
-#define RD_ARRAYSIZE(A)           RD_ARRAY_SIZE(A)
-#define RD_SIZEOF(TYPE,MEMBER)    sizeof(((TYPE *)NULL)->MEMBER)
-#define RD_OFFSETOF(TYPE,MEMBER)  ((size_t) &(((TYPE *)NULL)->MEMBER))
-
-/**
- * Returns the 'I'th array element from static sized array 'A'
- * or NULL if 'I' is out of range.
- * var-args is an optional prefix to provide the correct return type.
- */
-#define RD_ARRAY_ELEM(A,I,...)				\
-	((unsigned int)(I) < RD_ARRAY_SIZE(A) ? __VA_ARGS__ (A)[(I)] : NULL)
-
-
-#define RD_STRINGIFY(X)  # X
-
-
-
-#define RD_MIN(a,b) ((a) < (b) ? (a) : (b))
-#define RD_MAX(a,b) ((a) > (b) ? (a) : (b))
-
-
-/**
- * Cap an integer (of any type) to reside within the defined limit.
- */
-#define RD_INT_CAP(val,low,hi) \
-	((val) < (low) ? low : ((val) > (hi) ? (hi) : (val)))
-
-
-
-/**
- * Allocate 'size' bytes, copy 'src', return pointer to new memory.
- *
- * Use rd_free() to free the returned pointer.
-*/
-static RD_INLINE RD_UNUSED void *rd_memdup (const void *src, size_t size) {
-	void *dst = rd_malloc(size);
-	memcpy(dst, src, size);
-	return dst;
-}
-
-/**
- * @brief Memset &OBJ to 0, does automatic sizeof(OBJ).
- */
-#define RD_MEMZERO(OBJ) memset(&(OBJ), 0, sizeof(OBJ))
-
-
-/**
- * Generic refcnt interface
- */
-#ifndef _MSC_VER
-/* Mutexes (critical sections) are slow, even when uncontended, on Windows */
-#define RD_REFCNT_USE_LOCKS 1
-#endif
-
-#ifdef RD_REFCNT_USE_LOCKS
-typedef struct rd_refcnt_t {
-        mtx_t lock;
-        int v;
-} rd_refcnt_t;
-#else
-typedef rd_atomic32_t rd_refcnt_t;
-#endif
-
-#ifdef RD_REFCNT_USE_LOCKS
-static RD_INLINE RD_UNUSED int rd_refcnt_init (rd_refcnt_t *R, int v) {
-        int r;
-        mtx_init(&R->lock, mtx_plain);
-        mtx_lock(&R->lock);
-        r = R->v = v;
-        mtx_unlock(&R->lock);
-        return r;
-}
-#else
-#define rd_refcnt_init(R,v)  rd_atomic32_init(R, v)
-#endif
-
-#ifdef RD_REFCNT_USE_LOCKS
-static RD_INLINE RD_UNUSED void rd_refcnt_destroy (rd_refcnt_t *R) {
-        mtx_lock(&R->lock);
-        rd_assert(R->v == 0);
-        mtx_unlock(&R->lock);
-
-        mtx_destroy(&R->lock);
-}
-#else
-#define rd_refcnt_destroy(R) do { } while (0)
-#endif
-
-
-#ifdef RD_REFCNT_USE_LOCKS
-static RD_INLINE RD_UNUSED int rd_refcnt_set (rd_refcnt_t *R, int v) {
-        int r;
-        mtx_lock(&R->lock);
-        r = R->v = v;
-        mtx_unlock(&R->lock);
-        return r;
-}
-#else
-#define rd_refcnt_set(R,v)  rd_atomic32_set(R, v)
-#endif
-
-
-#ifdef RD_REFCNT_USE_LOCKS
-static RD_INLINE RD_UNUSED int rd_refcnt_add0 (rd_refcnt_t *R) {
-        int r;
-        mtx_lock(&R->lock);
-        r = ++(R->v);
-        mtx_unlock(&R->lock);
-        return r;
-}
-#else
-#define rd_refcnt_add0(R)  rd_atomic32_add(R, 1)
-#endif
-
-static RD_INLINE RD_UNUSED int rd_refcnt_sub0 (rd_refcnt_t *R) {
-        int r;
-#ifdef RD_REFCNT_USE_LOCKS
-        mtx_lock(&R->lock);
-        r = --(R->v);
-        mtx_unlock(&R->lock);
-#else
-        r = rd_atomic32_sub(R, 1);
-#endif
-        if (r < 0)
-                rd_assert(!*"refcnt sub-zero");
-        return r;
-}
-
-#ifdef RD_REFCNT_USE_LOCKS
-static RD_INLINE RD_UNUSED int rd_refcnt_get (rd_refcnt_t *R) {
-        int r;
-        mtx_lock(&R->lock);
-        r = R->v;
-        mtx_unlock(&R->lock);
-        return r;
-}
-#else
-#define rd_refcnt_get(R)   rd_atomic32_get(R)
-#endif
-
-/**
- * A wrapper for decreasing refcount and calling a destroy function
- * when refcnt reaches 0.
- */
-#define rd_refcnt_destroywrapper(REFCNT,DESTROY_CALL) do {      \
-                if (rd_refcnt_sub(REFCNT) > 0)                  \
-                        break;                                  \
-                DESTROY_CALL;                                   \
-        } while (0)
-
-
-#define rd_refcnt_destroywrapper2(REFCNT,WHAT,DESTROY_CALL) do {        \
-                if (rd_refcnt_sub2(REFCNT,WHAT) > 0)                        \
-                        break;                                  \
-                DESTROY_CALL;                                   \
-        } while (0)
-
-#if ENABLE_REFCNT_DEBUG
-#define rd_refcnt_add(R)                                                \
-        (                                                               \
-                printf("REFCNT DEBUG: %-35s %d +1: %16p: %s:%d\n",      \
-                       #R, rd_refcnt_get(R), (R), __FUNCTION__,__LINE__), \
-                rd_refcnt_add0(R)                                       \
-                )
-
-#define rd_refcnt_add2(R,WHAT)  do {                                        \
-                printf("REFCNT DEBUG: %-35s %d +1: %16p: %16s: %s:%d\n",      \
-                       #R, rd_refcnt_get(R), (R), WHAT, __FUNCTION__,__LINE__), \
-                rd_refcnt_add0(R);                                      \
-        } while (0)
-
-
-#define rd_refcnt_sub2(R,WHAT) (                                            \
-                printf("REFCNT DEBUG: %-35s %d -1: %16p: %16s: %s:%d\n",      \
-                       #R, rd_refcnt_get(R), (R), WHAT, __FUNCTION__,__LINE__), \
-                rd_refcnt_sub0(R) )
-
-#define rd_refcnt_sub(R) (                                              \
-                printf("REFCNT DEBUG: %-35s %d -1: %16p: %s:%d\n",      \
-                       #R, rd_refcnt_get(R), (R), __FUNCTION__,__LINE__), \
-                rd_refcnt_sub0(R) )
-
-#else
-#define rd_refcnt_add(R)  rd_refcnt_add0(R)
-#define rd_refcnt_sub(R)  rd_refcnt_sub0(R)
-#endif
-
-
-
-#if !ENABLE_SHAREDPTR_DEBUG
-
-/**
- * The non-debug version of shared_ptr is simply a reference counting interface
- * without any additional costs and no indirections.
- */
-
-#define RD_SHARED_PTR_TYPE(STRUCT_NAME,WRAPPED_TYPE) WRAPPED_TYPE
-
-
-#define rd_shared_ptr_get_src(FUNC,LINE,OBJ,REFCNT,SPTR_TYPE)	\
-        (rd_refcnt_add(REFCNT), (OBJ))
-#define rd_shared_ptr_get(OBJ,REFCNT,SPTR_TYPE)          \
-        (rd_refcnt_add(REFCNT), (OBJ))
-
-#define rd_shared_ptr_obj(SPTR) (SPTR)
-
-#define rd_shared_ptr_put(SPTR,REF,DESTRUCTOR)                  \
-                rd_refcnt_destroywrapper(REF,DESTRUCTOR)
-
-
-#else
-
-#define RD_SHARED_PTR_TYPE(STRUCT_NAME, WRAPPED_TYPE) \
-        struct STRUCT_NAME {                          \
-                LIST_ENTRY(rd_shptr0_s) link;         \
-                WRAPPED_TYPE *obj;                     \
-                rd_refcnt_t *ref;                     \
-                const char *typename;                 \
-                const char *func;                     \
-                int line;                             \
-        }
-
-
-
-/* Common backing struct compatible with RD_SHARED_PTR_TYPE() types */
-typedef RD_SHARED_PTR_TYPE(rd_shptr0_s, void) rd_shptr0_t;
-
-LIST_HEAD(rd_shptr0_head, rd_shptr0_s);
-extern struct rd_shptr0_head rd_shared_ptr_debug_list;
-extern mtx_t rd_shared_ptr_debug_mtx;
-
-static RD_INLINE RD_UNUSED RD_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
-rd_shptr0_t *rd_shared_ptr_get0 (const char *func, int line,
-                                 const char *typename,
-                                 rd_refcnt_t *ref, void *obj) {
-        rd_shptr0_t *sptr = rd_calloc(1, sizeof(*sptr));
-        sptr->obj = obj;
-        sptr->ref = ref;
-        sptr->typename = typename;
-        sptr->func = func;
-        sptr->line = line;
-
-        mtx_lock(&rd_shared_ptr_debug_mtx);
-        LIST_INSERT_HEAD(&rd_shared_ptr_debug_list, sptr, link);
-        mtx_unlock(&rd_shared_ptr_debug_mtx);
-        return sptr;
-}
-
-#define rd_shared_ptr_get_src(FUNC,LINE,OBJ,REF,SPTR_TYPE)		\
-        (rd_refcnt_add(REF),                                            \
-         (SPTR_TYPE *)rd_shared_ptr_get0(FUNC,LINE, #SPTR_TYPE,REF,OBJ))
-#define rd_shared_ptr_get(OBJ,REF,SPTR_TYPE)	\
-	rd_shared_ptr_get_src(__FUNCTION__, __LINE__, OBJ, REF, SPTR_TYPE)
-
-
-
-#define rd_shared_ptr_obj(SPTR) (SPTR)->obj
-
-#define rd_shared_ptr_put(SPTR,REF,DESTRUCTOR) do {               \
-                if (rd_refcnt_sub(REF) == 0)                      \
-                        DESTRUCTOR;                               \
-                mtx_lock(&rd_shared_ptr_debug_mtx);               \
-                LIST_REMOVE(SPTR, link);                          \
-                mtx_unlock(&rd_shared_ptr_debug_mtx);             \
-                rd_free(SPTR);                                    \
-        } while (0)
-
-void rd_shared_ptrs_dump (void);
-#endif
-
-
-#define RD_IF_FREE(PTR,FUNC) do { if ((PTR)) FUNC(PTR); } while (0)
-
-
-/**
- * @brief Utility types to hold memory,size tuple.
- */
-
-typedef struct rd_chariov_s {
-        char  *ptr;
-        size_t size;
-} rd_chariov_t;

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdaddr.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdaddr.c b/thirdparty/librdkafka-0.11.1/src/rdaddr.c
deleted file mode 100644
index 69625e4..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdaddr.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rd.h"
-#include "rdaddr.h"
-#include "rdrand.h"
-
-#ifdef _MSC_VER
-#include <WS2tcpip.h>
-#endif
-
-const char *rd_sockaddr2str (const void *addr, int flags) {
-	const rd_sockaddr_inx_t *a = (const rd_sockaddr_inx_t *)addr;
-	static RD_TLS char ret[32][INET6_ADDRSTRLEN + 16];
-	static RD_TLS int  reti = 0;
-	char portstr[64];
-	int of = 0;
-	int niflags = NI_NUMERICSERV;
-
-	reti = (reti + 1) % 32;
-	
-	switch (a->sinx_family)
-	{
-	case AF_INET:
-	case AF_INET6:
-		if (flags & RD_SOCKADDR2STR_F_FAMILY)
-			of += rd_snprintf(&ret[reti][of], sizeof(ret[reti])-of, "ipv%i#",
-				      a->sinx_family == AF_INET ? 4 : 6);
-
-		if ((flags & RD_SOCKADDR2STR_F_PORT) &&
-		    a->sinx_family == AF_INET6)
-			ret[reti][of++] = '[';
-
-		if (!(flags & RD_SOCKADDR2STR_F_RESOLVE))
-			niflags |= NI_NUMERICHOST;
-
-		if (getnameinfo((const struct sockaddr *)a,
-				RD_SOCKADDR_INX_LEN(a),
-				ret[reti]+of, sizeof(ret[reti])-of,
-				(flags & RD_SOCKADDR2STR_F_PORT) ?
-				portstr : NULL,
-				(flags & RD_SOCKADDR2STR_F_PORT) ?
-				sizeof(portstr) : 0,
-				niflags))
-			break;
-
-		
-		if (flags & RD_SOCKADDR2STR_F_PORT) {
-			size_t len = strlen(ret[reti]);
-			rd_snprintf(ret[reti]+len, sizeof(ret[reti])-len,
-				 "%s:%s",
-				 a->sinx_family == AF_INET6 ? "]" : "",
-				 portstr);
-		}
-	
-		return ret[reti];
-	}
-	
-
-	/* Error-case */
-	rd_snprintf(ret[reti], sizeof(ret[reti]), "<unsupported:%s>",
-		 rd_family2str(a->sinx_family));
-	
-	return ret[reti];
-}
-
-
-const char *rd_addrinfo_prepare (const char *nodesvc,
-				 char **node, char **svc) {
-	static RD_TLS char snode[256];
-	static RD_TLS char ssvc[64];
-	const char *t;
-	const char *svct = NULL;
-	size_t nodelen = 0;
-
-	*snode = '\0';
-	*ssvc = '\0';
-
-	if (*nodesvc == '[') {
-		/* "[host]".. (enveloped node name) */
-		if  (!(t = strchr(nodesvc, ']')))
-			return "Missing close-']'";
-		nodesvc++;
-		nodelen = t-nodesvc;
-		svct = t+1;
-
-	} else if (*nodesvc == ':' && *(nodesvc+1) != ':') {
-		/* ":"..  (port only) */
-		nodelen = 0;
-		svct = nodesvc;
-	}
-		
-	if ((svct = strrchr(svct ? svct : nodesvc, ':')) && (*(svct-1) != ':') &&
-	    *(++svct)) {
-		/* Optional ":service" definition. */
-		if (strlen(svct) >= sizeof(ssvc))
-			return "Service name too long";
-		strcpy(ssvc, svct);
-		if (!nodelen)
-			nodelen = svct - nodesvc - 1;
-
-	} else if (!nodelen)
-		nodelen = strlen(nodesvc);
-
-	if (nodelen) {
-		/* Truncate nodename if necessary. */
-		nodelen = RD_MIN(nodelen, sizeof(snode)-1);
-		strncpy(snode, nodesvc, nodelen);
-		snode[nodelen] = '\0';
-	}
-
-	*node = snode;
-	*svc = ssvc;
-
-	return NULL;
-}
-
-
-
-rd_sockaddr_list_t *rd_getaddrinfo (const char *nodesvc, const char *defsvc,
-				    int flags, int family,
-				    int socktype, int protocol,
-				    const char **errstr) {
-	struct addrinfo hints = { .ai_family = family,
-				  .ai_socktype = socktype,
-				  .ai_protocol = protocol,
-				  .ai_flags = flags };
-	struct addrinfo *ais, *ai;
-	char *node, *svc;
-	int r;
-	int cnt = 0;
-	rd_sockaddr_list_t *rsal;
-
-	if ((*errstr = rd_addrinfo_prepare(nodesvc, &node, &svc))) {
-		errno = EINVAL;
-		return NULL;
-	}
-
-	if (*svc)
-		defsvc = svc;
-		
-	if ((r = getaddrinfo(node, defsvc, &hints, &ais))) {
-#ifdef EAI_SYSTEM
-		if (r == EAI_SYSTEM)
-#else
-		if (0)
-#endif
-			*errstr = rd_strerror(errno);
-		else {
-#ifdef _MSC_VER
-			*errstr = gai_strerrorA(r);
-#else
-			*errstr = gai_strerror(r);
-#endif
-			errno = EFAULT;
-		}
-		return NULL;
-	}
-	
-	/* Count number of addresses */
-	for (ai = ais ; ai != NULL ; ai = ai->ai_next)
-		cnt++;
-
-	if (cnt == 0) {
-		/* unlikely? */
-		freeaddrinfo(ais);
-		errno = ENOENT;
-		*errstr = "No addresses";
-		return NULL;
-	}
-
-
-	rsal = rd_calloc(1, sizeof(*rsal) + (sizeof(*rsal->rsal_addr) * cnt));
-
-	for (ai = ais ; ai != NULL ; ai = ai->ai_next)
-		memcpy(&rsal->rsal_addr[rsal->rsal_cnt++],
-		       ai->ai_addr, ai->ai_addrlen);
-
-	freeaddrinfo(ais);
-
-	/* Shuffle address list for proper round-robin */
-	if (!(flags & RD_AI_NOSHUFFLE))
-		rd_array_shuffle(rsal->rsal_addr, rsal->rsal_cnt,
-				 sizeof(*rsal->rsal_addr));
-
-	return rsal;
-}
-
-
-
-void rd_sockaddr_list_destroy (rd_sockaddr_list_t *rsal) {
-	rd_free(rsal);
-}
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdaddr.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdaddr.h b/thirdparty/librdkafka-0.11.1/src/rdaddr.h
deleted file mode 100644
index dd1c419..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdaddr.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#ifndef _MSC_VER
-#include <netinet/in.h>
-#include <arpa/inet.h>
-#include <netdb.h>
-#else
-#define WIN32_MEAN_AND_LEAN
-#include <WinSock2.h>
-#include <ws2ipdef.h>
-#endif
-
-#if defined(__FreeBSD__) || defined(_AIX)
-#include <sys/socket.h>
-#endif
-
-/**
- * rd_sockaddr_inx_t is a union for either ipv4 or ipv6 sockaddrs.
- * It provides conveniant abstraction of AF_INET* agnostic operations.
- */
-typedef union {
-	struct sockaddr_in in;
-	struct sockaddr_in6 in6;
-} rd_sockaddr_inx_t;
-#define sinx_family in.sin_family
-#define sinx_addr   in.sin_addr
-#define RD_SOCKADDR_INX_LEN(sinx) \
-	((sinx)->sinx_family == AF_INET ? sizeof(struct sockaddr_in) :	\
-	 (sinx)->sinx_family == AF_INET6 ? sizeof(struct sockaddr_in6):	\
-	 sizeof(rd_sockaddr_inx_t))
-#define RD_SOCKADDR_INX_PORT(sinx)					\
-	((sinx)->sinx_family == AF_INET ? (sinx)->in.sin_port :		\
-	 (sinx)->sinx_family == AF_INET6 ? (sinx)->in6.sin6_port : 0)
-
-#define RD_SOCKADDR_INX_PORT_SET(sinx,port) do {		\
-	if ((sinx)->sinx_family == AF_INET)			\
-		(sinx)->in.sin_port = port;			\
-	else if ((sinx)->sinx_family == AF_INET6)		\
-		(sinx)->in6.sin6_port = port;			\
- } while (0)
-
-
-
-/**
- * Returns a thread-local temporary string (may be called up to 32 times
- * without buffer wrapping) containing the human string representation
- * of the sockaddr (which should be AF_INET or AF_INET6 at this point).
- * If the RD_SOCKADDR2STR_F_PORT is provided the port number will be
- * appended to the string.
- * IPv6 address enveloping ("[addr]:port") will also be performed
- * if .._F_PORT is set.
- */
-#define RD_SOCKADDR2STR_F_PORT    0x1  /* Append the port. */
-#define RD_SOCKADDR2STR_F_RESOLVE 0x2  /* Try to resolve address to hostname. */
-#define RD_SOCKADDR2STR_F_FAMILY  0x4  /* Prepend address family. */
-#define RD_SOCKADDR2STR_F_NICE         /* Nice and friendly output */ \
-	(RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_RESOLVE)
-const char *rd_sockaddr2str (const void *addr, int flags);
-
-
-/**
- * Splits a node:service definition up into their node and svc counterparts
- * suitable for passing to getaddrinfo().
- * Returns NULL on success (and temporarily available pointers in '*node'
- * and '*svc') or error string on failure.
- *
- * Thread-safe but returned buffers in '*node' and '*svc' are only
- * usable until the next call to rd_addrinfo_prepare() in the same thread.
- */
-const char *rd_addrinfo_prepare (const char *nodesvc,
-				 char **node, char **svc);
-
-
-
-typedef struct rd_sockaddr_list_s {
-	int rsal_cnt;
-	int rsal_curr;
-	rd_sockaddr_inx_t rsal_addr[];
-} rd_sockaddr_list_t;
-
-
-/**
- * Returns the next address from a sockaddr list and updates
- * the current-index to point to it.
- *
- * Typical usage is for round-robin connection attempts or similar:
- *   while (1) {
- *       rd_sockaddr_inx_t *sinx = rd_sockaddr_list_next(my_server_list);
- *       if (do_connect((struct sockaddr *)sinx) == -1) {
- *          sleep(1);
- *          continue;
- *       }
- *       ...
- *   }
- * 
- */
- 
-static RD_INLINE rd_sockaddr_inx_t *
-rd_sockaddr_list_next (rd_sockaddr_list_t *rsal) RD_UNUSED;
-static RD_INLINE rd_sockaddr_inx_t *
-rd_sockaddr_list_next (rd_sockaddr_list_t *rsal) {
-	rsal->rsal_curr = (rsal->rsal_curr + 1) % rsal->rsal_cnt;
-	return &rsal->rsal_addr[rsal->rsal_curr];
-}
-
-
-#define RD_SOCKADDR_LIST_FOREACH(sinx, rsal)			\
-	for  ((sinx) = &(rsal)->rsal_addr[0] ;			\
-	      (sinx) < &(rsal)->rsal_addr[(rsal)->rsal_len] ;	\
-	      (sinx)++)
-
-/**
- * Wrapper for getaddrinfo(3) that performs these additional tasks:
- *  - Input is a combined "<node>[:<svc>]" string, with support for
- *    IPv6 enveloping ("[addr]:port").
- *  - Returns a rd_sockaddr_list_t which must be freed with
- *    rd_sockaddr_list_destroy() when done with it.
- *  - Automatically shuffles the returned address list to provide
- *    round-robin (unless RD_AI_NOSHUFFLE is provided in 'flags').
- *
- * Thread-safe.
- */
-#define RD_AI_NOSHUFFLE  0x10000000 /* Dont shuffle returned address list.
-				     * FIXME: Guessing non-used bits like this
-				     *        is a bad idea. */
-
-rd_sockaddr_list_t *rd_getaddrinfo (const char *nodesvc, const char *defsvc,
-				    int flags, int family,
-				    int socktype, int protocol,
-				    const char **errstr);
-
-
-
-/**
- * Frees a sockaddr list.
- *
- * Thread-safe.
- */
-void rd_sockaddr_list_destroy (rd_sockaddr_list_t *rsal);
-
-
-
-/**
- * Returns the human readable name of a socket family.
- */
-static const char *rd_family2str (int af) RD_UNUSED;
-static const char *rd_family2str (int af) {
-	switch(af){
-		case AF_INET:
-			return "inet";
-		case AF_INET6:
-			return "inet6";
-		default:
-			return "af?";
-	};
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdatomic.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdatomic.h b/thirdparty/librdkafka-0.11.1/src/rdatomic.h
deleted file mode 100644
index 99099f7..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdatomic.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2014-2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#pragma once
-
-#include "tinycthread.h"
-
-typedef struct {
-	int32_t val;
-#ifndef HAVE_ATOMICS_32
-	mtx_t lock;
-#endif
-} rd_atomic32_t;
-
-typedef struct {
-	int64_t val;
-#ifndef HAVE_ATOMICS_64
-	mtx_t lock;
-#endif
-} rd_atomic64_t;
-
-
-static RD_INLINE RD_UNUSED void rd_atomic32_init (rd_atomic32_t *ra, int32_t v) {
-	ra->val = v;
-#if !defined(_MSC_VER) && !defined(HAVE_ATOMICS_32)
-	mtx_init(&ra->lock, mtx_plain);
-#endif
-}
-
-
-static RD_INLINE int32_t RD_UNUSED rd_atomic32_add (rd_atomic32_t *ra, int32_t v) {
-#ifdef __SUNPRO_C
-	return atomic_add_32_nv(&ra->val, v);
-#elif defined(_MSC_VER)
-	return InterlockedAdd(&ra->val, v);
-#elif !defined(HAVE_ATOMICS_32)
-	int32_t r;
-	mtx_lock(&ra->lock);
-	ra->val += v;
-	r = ra->val;
-	mtx_unlock(&ra->lock);
-	return r;
-#else
-	return ATOMIC_OP32(add, fetch, &ra->val, v);
-#endif
-}
-
-static RD_INLINE int32_t RD_UNUSED rd_atomic32_sub(rd_atomic32_t *ra, int32_t v) {
-#ifdef __SUNPRO_C
-	return atomic_add_32_nv(&ra->val, -v);
-#elif defined(_MSC_VER)
-	return InterlockedAdd(&ra->val, -v);
-#elif !defined(HAVE_ATOMICS_32)
-	int32_t r;
-	mtx_lock(&ra->lock);
-	ra->val -= v;
-	r = ra->val;
-	mtx_unlock(&ra->lock);
-	return r;
-#else
-	return ATOMIC_OP32(sub, fetch, &ra->val, v);
-#endif
-}
-
-static RD_INLINE int32_t RD_UNUSED rd_atomic32_get(rd_atomic32_t *ra) {
-#if defined(_MSC_VER) || defined(__SUNPRO_C)
-	return ra->val;
-#elif !defined(HAVE_ATOMICS_32)
-	int32_t r;
-	mtx_lock(&ra->lock);
-	r = ra->val;
-	mtx_unlock(&ra->lock);
-	return r;
-#else
-	return ATOMIC_OP32(fetch, add, &ra->val, 0);
-#endif
-}
-
-static RD_INLINE int32_t RD_UNUSED rd_atomic32_set(rd_atomic32_t *ra, int32_t v) {
-#ifdef _MSC_VER
-	return InterlockedExchange(&ra->val, v);
-#elif !defined(HAVE_ATOMICS_32)
-	int32_t r;
-	mtx_lock(&ra->lock);
-	r = ra->val = v;
-	mtx_unlock(&ra->lock);
-	return r;
-#else
-	return ra->val = v; // FIXME
-#endif
-}
-
-
-
-static RD_INLINE RD_UNUSED void rd_atomic64_init (rd_atomic64_t *ra, int64_t v) {
-	ra->val = v;
-#if !defined(_MSC_VER) && !defined(HAVE_ATOMICS_64)
-	mtx_init(&ra->lock, mtx_plain);
-#endif
-}
-
-static RD_INLINE int64_t RD_UNUSED rd_atomic64_add (rd_atomic64_t *ra, int64_t v) {
-#ifdef __SUNPRO_C
-	return atomic_add_64_nv(&ra->val, v);
-#elif defined(_MSC_VER)
-	return InterlockedAdd64(&ra->val, v);
-#elif !defined(HAVE_ATOMICS_64)
-	int64_t r;
-	mtx_lock(&ra->lock);
-	ra->val += v;
-	r = ra->val;
-	mtx_unlock(&ra->lock);
-	return r;
-#else
-	return ATOMIC_OP64(add, fetch, &ra->val, v);
-#endif
-}
-
-static RD_INLINE int64_t RD_UNUSED rd_atomic64_sub(rd_atomic64_t *ra, int64_t v) {
-#ifdef __SUNPRO_C
-	return atomic_add_64_nv(&ra->val, -v);
-#elif defined(_MSC_VER)
-	return InterlockedAdd64(&ra->val, -v);
-#elif !defined(HAVE_ATOMICS_64)
-	int64_t r;
-	mtx_lock(&ra->lock);
-	ra->val -= v;
-	r = ra->val;
-	mtx_unlock(&ra->lock);
-	return r;
-#else
-	return ATOMIC_OP64(sub, fetch, &ra->val, v);
-#endif
-}
-
-static RD_INLINE int64_t RD_UNUSED rd_atomic64_get(rd_atomic64_t *ra) {
-#if defined(_MSC_VER) || defined(__SUNPRO_C)
-	return ra->val;
-#elif !defined(HAVE_ATOMICS_64)
-	int64_t r;
-	mtx_lock(&ra->lock);
-	r = ra->val;
-	mtx_unlock(&ra->lock);
-	return r;
-#else
-	return ATOMIC_OP64(fetch, add, &ra->val, 0);
-#endif
-}
-
-
-static RD_INLINE int64_t RD_UNUSED rd_atomic64_set(rd_atomic64_t *ra, int64_t v) {
-#ifdef _MSC_VER
-	return InterlockedExchange64(&ra->val, v);
-#elif !defined(HAVE_ATOMICS_64)
-	int64_t r;
-	mtx_lock(&ra->lock);
-	ra->val = v;
-	r = ra->val;
-	mtx_unlock(&ra->lock);
-	return r;
-#else
-	return ra->val = v; // FIXME
-#endif
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdavg.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdavg.h b/thirdparty/librdkafka-0.11.1/src/rdavg.h
deleted file mode 100644
index 98661d8..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdavg.h
+++ /dev/null
@@ -1,95 +0,0 @@
-#pragma once
-
-
-typedef struct rd_avg_s {
-        struct {
-                int64_t maxv;
-                int64_t minv;
-                int64_t avg;
-                int64_t sum;
-                int     cnt;
-                rd_ts_t start;
-        } ra_v;
-        mtx_t ra_lock;
-        enum {
-                RD_AVG_GAUGE,
-                RD_AVG_COUNTER,
-        } ra_type;
-} rd_avg_t;
-
-
-/**
- * Add timestamp 'ts' to averager 'ra'.
- */
-static RD_UNUSED void rd_avg_add (rd_avg_t *ra, int64_t v) {
-        mtx_lock(&ra->ra_lock);
-	if (v > ra->ra_v.maxv)
-		ra->ra_v.maxv = v;
-	if (ra->ra_v.minv == 0 || v < ra->ra_v.minv)
-		ra->ra_v.minv = v;
-	ra->ra_v.sum += v;
-	ra->ra_v.cnt++;
-        mtx_unlock(&ra->ra_lock);
-}
-
-
-/**
- * @brief Calculate the average
- */
-static RD_UNUSED void rd_avg_calc (rd_avg_t *ra, rd_ts_t now) {
-        if (ra->ra_type == RD_AVG_GAUGE) {
-                if (ra->ra_v.cnt)
-                        ra->ra_v.avg = ra->ra_v.sum / ra->ra_v.cnt;
-                else
-                        ra->ra_v.avg = 0;
-        } else {
-                rd_ts_t elapsed = now - ra->ra_v.start;
-
-                if (elapsed)
-                        ra->ra_v.avg = (ra->ra_v.sum * 1000000llu) / elapsed;
-                else
-                        ra->ra_v.avg = 0;
-
-                ra->ra_v.start = elapsed;
-        }
-}
-
-
-/**
- * Rolls over statistics in 'src' and stores the average in 'dst'.
- * 'src' is cleared and ready to be reused.
- */
-static RD_UNUSED void rd_avg_rollover (rd_avg_t *dst,
-					     rd_avg_t *src) {
-        rd_ts_t now = rd_clock();
-
-        mtx_lock(&src->ra_lock);
-        dst->ra_type = src->ra_type;
-	dst->ra_v    = src->ra_v;
-	memset(&src->ra_v, 0, sizeof(src->ra_v));
-        src->ra_v.start = now;
-        mtx_unlock(&src->ra_lock);
-
-        rd_avg_calc(dst, now);
-}
-
-
-/**
- * Initialize an averager
- */
-static RD_UNUSED void rd_avg_init (rd_avg_t *ra, int type) {
-        rd_avg_t dummy;
-        memset(ra, 0, sizeof(*ra));
-        mtx_init(&ra->ra_lock, 0);
-        ra->ra_type = type;
-
-        rd_avg_rollover(&dummy, ra);
-}
-
-/**
- * Destroy averager
- */
-static RD_UNUSED void rd_avg_destroy (rd_avg_t *ra) {
-        mtx_destroy(&ra->ra_lock);
-}
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdavl.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdavl.c b/thirdparty/librdkafka-0.11.1/src/rdavl.c
deleted file mode 100644
index 2f58dd4..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdavl.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012-2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdavl.h"
-
-/*
- * AVL tree.
- * Inspired by Ian Piumarta's tree.h implementation.
- */
-
-#define RD_AVL_NODE_HEIGHT(ran) ((ran) ? (ran)->ran_height : 0)
-
-#define RD_AVL_NODE_DELTA(ran) \
-        (RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_LEFT]) - \
-         RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_RIGHT]))
-
-#define RD_DELTA_MAX 1
-
-
-static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran);
-
-static rd_avl_node_t *rd_avl_rotate (rd_avl_node_t *ran, rd_avl_dir_t dir) {
-        rd_avl_node_t *n;
-        static const rd_avl_dir_t odirmap[] = { /* opposite direction map */
-                [RD_AVL_RIGHT] = RD_AVL_LEFT,
-                [RD_AVL_LEFT]  = RD_AVL_RIGHT
-        };
-        const int odir = odirmap[dir];
-
-        n = ran->ran_p[odir];
-        ran->ran_p[odir] = n->ran_p[dir];
-        n->ran_p[dir] = rd_avl_balance_node(ran);
-
-        return rd_avl_balance_node(n);
-}
-
-static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran) {
-        const int d = RD_AVL_NODE_DELTA(ran);
-        int h;
-
-        if (d < -RD_DELTA_MAX) {
-                if (RD_AVL_NODE_DELTA(ran->ran_p[RD_AVL_RIGHT]) > 0)
-                        ran->ran_p[RD_AVL_RIGHT] =
-                                rd_avl_rotate(ran->ran_p[RD_AVL_RIGHT],
-                                              RD_AVL_RIGHT);
-                return rd_avl_rotate(ran, RD_AVL_LEFT);
-
-        } else if (d > RD_DELTA_MAX) {
-                if (RD_AVL_NODE_DELTA(ran->ran_p[RD_AVL_LEFT]) < 0)
-                        ran->ran_p[RD_AVL_LEFT] =
-                                rd_avl_rotate(ran->ran_p[RD_AVL_LEFT],
-                                              RD_AVL_LEFT);
-
-                return rd_avl_rotate(ran, RD_AVL_RIGHT);
-        }
-
-        ran->ran_height = 0;
-
-        if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_LEFT])) > ran->ran_height)
-                ran->ran_height = h;
-
-        if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_RIGHT])) >ran->ran_height)
-                ran->ran_height = h;
-
-        ran->ran_height++;
-
-        return ran;
-}
-
-rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl,
-                                   rd_avl_node_t *parent,
-                                   rd_avl_node_t *ran,
-                                   rd_avl_node_t **existing) {
-        rd_avl_dir_t dir;
-        int r;
-
-        if (!parent)
-                return ran;
-
-        if ((r = ravl->ravl_cmp(ran->ran_elm, parent->ran_elm)) == 0) {
-                /* Replace existing node with new one. */
-                ran->ran_p[RD_AVL_LEFT] = parent->ran_p[RD_AVL_LEFT];
-                ran->ran_p[RD_AVL_RIGHT] = parent->ran_p[RD_AVL_RIGHT];
-                ran->ran_height = parent->ran_height;
-                *existing = parent;
-                return ran;
-        }
-
-        if (r < 0)
-                dir = RD_AVL_LEFT;
-        else
-                dir = RD_AVL_RIGHT;
-
-        parent->ran_p[dir] = rd_avl_insert_node(ravl, parent->ran_p[dir],
-                                                ran, existing);
-        return rd_avl_balance_node(parent);
-}
-
-
-static rd_avl_node_t *rd_avl_move (rd_avl_node_t *dst, rd_avl_node_t *src,
-                                   rd_avl_dir_t dir) {
-
-        if (!dst)
-                return src;
-
-        dst->ran_p[dir] = rd_avl_move(dst->ran_p[dir], src, dir);
-
-        return rd_avl_balance_node(dst);
-}
-
-static rd_avl_node_t *rd_avl_remove_node0 (rd_avl_node_t *ran) {
-        rd_avl_node_t *tmp;
-
-        tmp = rd_avl_move(ran->ran_p[RD_AVL_LEFT],
-                          ran->ran_p[RD_AVL_RIGHT],
-                          RD_AVL_RIGHT);
-
-        ran->ran_p[RD_AVL_LEFT] = ran->ran_p[RD_AVL_RIGHT] = NULL;
-        return tmp;
-}
-
-
-rd_avl_node_t *rd_avl_remove_elm0 (rd_avl_t *ravl, rd_avl_node_t *parent,
-                                   const void *elm) {
-        rd_avl_dir_t dir;
-        int r;
-
-        if (!parent)
-                return NULL;
-
-
-        if ((r = ravl->ravl_cmp(elm, parent->ran_elm)) == 0)
-                return rd_avl_remove_node0(parent);
-        else if  (r < 0)
-                dir = RD_AVL_LEFT;
-        else /* > 0 */
-                dir = RD_AVL_RIGHT;
-
-        parent->ran_p[dir] =
-                rd_avl_remove_elm0(ravl, parent->ran_p[dir], elm);
-
-        return rd_avl_balance_node(parent);
-}
-
-
-
-rd_avl_node_t *rd_avl_find_node (const rd_avl_t *ravl,
-                                 const rd_avl_node_t *begin,
-                                 const void *elm) {
-        int r;
-
-        if (!begin)
-                return NULL;
-        else if (!(r = ravl->ravl_cmp(elm, begin->ran_elm)))
-                return (rd_avl_node_t *)begin;
-        else if (r < 0)
-                return rd_avl_find_node(ravl, begin->ran_p[RD_AVL_LEFT], elm);
-        else /* r > 0 */
-                return rd_avl_find_node(ravl, begin->ran_p[RD_AVL_RIGHT], elm);
-}
-
-
-
-void rd_avl_destroy (rd_avl_t *ravl) {
-        if (ravl->ravl_flags & RD_AVL_F_LOCKS)
-                rwlock_destroy(&ravl->ravl_rwlock);
-
-        if (ravl->ravl_flags & RD_AVL_F_OWNER)
-                free(ravl);
-}
-
-rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags) {
-
-        if (!ravl) {
-                ravl = calloc(1, sizeof(*ravl));
-                flags |= RD_AVL_F_OWNER;
-        } else {
-                memset(ravl, 0, sizeof(*ravl));
-        }
-
-        ravl->ravl_flags = flags;
-        ravl->ravl_cmp = cmp;
-
-        if (flags & RD_AVL_F_LOCKS)
-                rwlock_init(&ravl->ravl_rwlock);
-
-        return ravl;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdavl.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdavl.h b/thirdparty/librdkafka-0.11.1/src/rdavl.h
deleted file mode 100644
index ffd33dd..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdavl.h
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012-2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/*
- * AVL tree.
- * Inspired by Ian Piumarta's tree.h implementation.
- */
-
-#pragma once
-
-#include "tinycthread.h"
-
-
-typedef enum {
-        RD_AVL_LEFT,
-        RD_AVL_RIGHT,
-} rd_avl_dir_t;
-
-/**
- * AVL tree node.
- * Add 'rd_avl_node_t ..' as field to your element's struct and
- * provide it as the 'field' argument in the API below.
- */
-typedef struct rd_avl_node_s {
-        struct rd_avl_node_s *ran_p[2];    /* RD_AVL_LEFT and RD_AVL_RIGHT */
-        int                   ran_height;  /* Sub-tree height */
-        void                 *ran_elm;     /* Backpointer to the containing
-                                            * element. This could be considered
-                                            * costly but is convenient for the
-                                            * caller: RAM is cheap,
-                                            * development time isn't*/
-} rd_avl_node_t;
-
-
-
-/**
- * Per-AVL application-provided element comparator.
- */
-typedef int (*rd_avl_cmp_t) (const void *, const void *);
-
-
-/**
- * AVL tree
- */
-typedef struct rd_avl_s {
-        rd_avl_node_t *ravl_root;   /* Root node */
-        rd_avl_cmp_t   ravl_cmp;    /* Comparator */
-        int            ravl_flags;  /* Flags */
-#define RD_AVL_F_LOCKS      0x1     /* Enable thread-safeness */
-#define RD_AVL_F_OWNER      0x2     /* internal: rd_avl_init() allocated ravl */
-        rwlock_t       ravl_rwlock; /* Mutex when .._F_LOCKS is set. */
-} rd_avl_t;
-
-
-
-
-/**
- *
- *
- * Public API
- *
- *
- */
-
-/**
- * Insert 'elm' into AVL tree.
- * In case of collision the previous entry is overwritten by the
- * new one and the previous element is returned, else NULL.
- */
-#define RD_AVL_INSERT(ravl,elm,field)           \
-        rd_avl_insert(ravl, elm, &(elm)->field)
-
-
-/**
- * Remove element by matching value 'elm' using compare function.
- */
-#define RD_AVL_REMOVE_ELM(ravl,elm)     \
-        rd_avl_remove_elm(ravl, elm)
-
-/**
- * Search for (by value using compare function) and return matching elm.
- */
-#define RD_AVL_FIND(ravl,elm)           \
-        rd_avl_find(ravl, elm, 1)
-
-
-/**
- * Search (by value using compare function) for and return matching elm.
- * Same as RD_AVL_FIND_NL() but assumes 'ravl' ís already locked
- * by 'rd_avl_*lock()'.
- *
- * NOTE: rd_avl_wrlock() must be held.
- */
-#define RD_AVL_FIND_NL(ravl,elm)                \
-        rd_avl_find_node(ravl, (ravl)->ravl_root, elm, 0)
-
-
-/**
- * Search (by value using compare function) for elm and return its AVL node.
- *
- * NOTE: rd_avl_wrlock() must be held.
- */
-#define RD_AVL_FIND_NODE_NL(ravl,elm)           \
-        rd_avl_find(ravl, elm, 0)
-
-
-/**
- * Changes the element pointer for an existing AVL node in the tree.
- * The new element must be identical (according to the comparator) 
- * to the previous element.
- *
- * NOTE: rd_avl_wrlock() must be held.
- */
-#define RD_AVL_ELM_SET_NL(ran,elm)  ((ran)->ran_elm = (elm))
-
-/**
- * Returns the current element pointer for an existing AVL node in the tree
- * 
- * NOTE: rd_avl_*lock() must be held.
- */
-#define RD_AVL_ELM_GET_NL(ran)      ((ran)->ran_elm)
-
-
-
-/**
- * Destroy previously initialized (by rd_avl_init()) AVL tree.
- */
-void      rd_avl_destroy (rd_avl_t *ravl);
-
-/**
- * Initialize (and optionally allocate if 'ravl' is NULL) AVL tree.
- * 'cmp' is the comparison function that takes two const pointers
- * pointing to the elements being compared (rather than the avl_nodes).
- * 'flags' is zero or more of the RD_AVL_F_.. flags.
- *
- * For thread-safe AVL trees supply RD_AVL_F_LOCKS in 'flags'.
- */
-rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags);
-
-
-/**
- * 'ravl' locking functions.
- * Locking is performed automatically for all methods except for
- * those with the "_NL"/"_nl" suffix ("not locked") which expects
- * either read or write lock to be held.
- *
- * rdavl utilizes rwlocks to allow multiple concurrent read threads.
- */
-static RD_INLINE RD_UNUSED void rd_avl_rdlock (rd_avl_t *ravl) {
-        if (ravl->ravl_flags & RD_AVL_F_LOCKS)
-                rwlock_rdlock(&ravl->ravl_rwlock);
-}
-
-static RD_INLINE RD_UNUSED void rd_avl_wrlock (rd_avl_t *ravl) {
-        if (ravl->ravl_flags & RD_AVL_F_LOCKS)
-                rwlock_wrlock(&ravl->ravl_rwlock);
-}
-
-static RD_INLINE RD_UNUSED void rd_avl_rdunlock (rd_avl_t *ravl) {
-        if (ravl->ravl_flags & RD_AVL_F_LOCKS)
-                rwlock_rdunlock(&ravl->ravl_rwlock);
-}
-
-static RD_INLINE RD_UNUSED void rd_avl_wrunlock (rd_avl_t *ravl) {
-        if (ravl->ravl_flags & RD_AVL_F_LOCKS)
-                rwlock_wrunlock(&ravl->ravl_rwlock);
-}
-
-
-
-
-/**
- * Private API, dont use directly.
- */
-
-rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl,
-                                   rd_avl_node_t *parent,
-                                   rd_avl_node_t *ran,
-                                   rd_avl_node_t **existing);
-
-static RD_UNUSED void *rd_avl_insert (rd_avl_t *ravl, void *elm,
-                            rd_avl_node_t *ran) {
-        rd_avl_node_t *existing = NULL;
-
-        memset(ran, 0, sizeof(*ran));
-        ran->ran_elm = elm;
-
-        rd_avl_wrlock(ravl);
-        ravl->ravl_root = rd_avl_insert_node(ravl, ravl->ravl_root,
-                                             ran, &existing);
-        rd_avl_wrunlock(ravl);
-
-        return existing ? existing->ran_elm : NULL;
-}
-
-rd_avl_node_t *rd_avl_remove_elm0 (rd_avl_t *ravl, rd_avl_node_t *parent,
-                                   const void *elm);
-
-static RD_INLINE RD_UNUSED
-void rd_avl_remove_elm (rd_avl_t *ravl, const void *elm) {
-        rd_avl_wrlock(ravl);
-        ravl->ravl_root = rd_avl_remove_elm0(ravl, ravl->ravl_root, elm);
-        rd_avl_wrunlock(ravl);
-}
-
-
-rd_avl_node_t *rd_avl_find_node (const rd_avl_t *ravl,
-                                 const rd_avl_node_t *begin,
-                                 const void *elm);
-
-
-static RD_INLINE RD_UNUSED void *rd_avl_find (rd_avl_t *ravl, const void *elm,
-                                              int dolock) {
-        const rd_avl_node_t *ran;
-        void *ret;
-
-        if (dolock)
-                rd_avl_rdlock(ravl);
-
-        ran = rd_avl_find_node(ravl, ravl->ravl_root, elm);
-        ret = ran ? ran->ran_elm : NULL;
-
-        if (dolock)
-                rd_avl_rdunlock(ravl);
-
-        return ret;
-}


[05/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mklove/Makefile.base
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mklove/Makefile.base b/thirdparty/librdkafka-0.11.4/mklove/Makefile.base
new file mode 100755
index 0000000..d8af4ec
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mklove/Makefile.base
@@ -0,0 +1,215 @@
+# Base Makefile providing various standard targets
+# Part of mklove suite but may be used independently.
+
+MKL_RED?=	\033[031m
+MKL_GREEN?=	\033[032m
+MKL_YELLOW?=	\033[033m
+MKL_BLUE?=	\033[034m
+MKL_CLR_RESET?=	\033[0m
+
+DEPS=		$(OBJS:%.o=%.d)
+
+# TOPDIR is "TOPDIR/mklove/../" i.e., TOPDIR.
+# We do it with two dir calls instead of /.. to support mklove being symlinked.
+MKLOVE_DIR := $(dir $(lastword $(MAKEFILE_LIST)))
+TOPDIR = $(MKLOVE_DIR:mklove/=.)
+
+
+# Convert LIBNAME ("libxyz") to "xyz"
+LIBNAME0=$(LIBNAME:lib%=%)
+
+# Silence lousy default ARFLAGS (rv)
+ARFLAGS=
+
+ifndef MKL_MAKEFILE_CONFIG
+-include $(TOPDIR)/Makefile.config
+endif
+
+_UNAME_S := $(shell uname -s)
+ifeq ($(_UNAME_S),Darwin)
+	LIBFILENAME=$(LIBNAME).$(LIBVER)$(SOLIB_EXT)
+	LIBFILENAMELINK=$(LIBNAME)$(SOLIB_EXT)
+else
+	LIBFILENAME=$(LIBNAME)$(SOLIB_EXT).$(LIBVER)
+	LIBFILENAMELINK=$(LIBNAME)$(SOLIB_EXT)
+endif
+
+INSTALL?=		install
+INSTALL_PROGRAM?=	$(INSTALL)
+INSTALL_DATA?=		$(INSTALL) -m 644
+
+prefix?=	/usr/local
+exec_prefix?=	$(prefix)
+bindir?=	$(exec_prefix)/bin
+sbindir?=	$(exec_prefix)/sbin
+libexecdir?=	$(exec_prefix)/libexec/  # append PKGNAME on install
+datarootdir?=	$(prefix)/share
+datadir?=	$(datarootdir)		 # append PKGNAME on install
+sysconfdir?=	$(prefix)/etc
+sharedstatedir?=$(prefix)/com
+localestatedir?=$(prefix)/var
+runstatedir?=	$(localestatedir)/run
+includedir?=	$(prefix)/include
+docdir?=	$(datarootdir)/doc/$(PKGNAME)
+infodir?=	$(datarootdir)/info
+libdir?=	$(prefix)/lib
+localedir?=	$(datarootdir)/locale
+pkgconfigdir?=	$(libdir)/pkgconfig
+mandir?=	$(datarootdir)/man
+man1dir?=	$(mandir)/man1
+man2dir?=	$(mandir)/man2
+man3dir?=	$(mandir)/man3
+man4dir?=	$(mandir)/man4
+man5dir?=	$(mandir)/man5
+man6dir?=	$(mandir)/man6
+man7dir?=	$(mandir)/man7
+man8dir?=	$(mandir)/man8
+
+
+# Checks that mklove is set up and ready for building
+mklove-check:
+	@if [ ! -f "$(TOPDIR)/Makefile.config" ]; then \
+		printf "$(MKL_RED)$(TOPDIR)/Makefile.config missing: please run ./configure$(MKL_CLR_RESET)\n" ; \
+		exit 1 ; \
+	fi
+
+%.o: %.c
+	$(CC) -MD -MP $(CPPFLAGS) $(CFLAGS) -c $< -o $@
+
+%.o: %.cpp
+	$(CXX) -MD -MP $(CPPFLAGS) $(CXXFLAGS) -c $< -o $@
+
+
+lib: $(LIBFILENAME) $(LIBNAME).a $(LIBFILENAMELINK) lib-gen-pkg-config
+
+$(LIBNAME).lds: #overridable
+
+$(LIBFILENAME): $(OBJS) $(LIBNAME).lds
+	@printf "$(MKL_YELLOW)Creating shared library $@$(MKL_CLR_RESET)\n"
+	$(CC) $(LDFLAGS) $(LIB_LDFLAGS) $(OBJS) -o $@ $(LIBS)
+
+$(LIBNAME).a:	$(OBJS)
+	@printf "$(MKL_YELLOW)Creating static library $@$(MKL_CLR_RESET)\n"
+	$(AR) rcs$(ARFLAGS) $@ $(OBJS)
+
+$(LIBFILENAMELINK): $(LIBFILENAME)
+	@printf "$(MKL_YELLOW)Creating $@ symlink$(MKL_CLR_RESET)\n"
+	rm -f "$@" && ln -s "$^" "$@"
+
+
+# pkg-config .pc file definition
+ifeq ($(GEN_PKG_CONFIG),y)
+define _PKG_CONFIG_DEF
+prefix=$(prefix)
+libdir=$(libdir)
+includedir=$(includedir)
+
+Name: $(LIBNAME)
+Description: $(MKL_APP_DESC_ONELINE)
+Version: $(MKL_APP_VERSION)
+Cflags: -I$${includedir}
+Libs: -L$${libdir} -l$(LIBNAME0)
+Libs.private: $(LIBS)
+endef
+
+export _PKG_CONFIG_DEF
+
+define _PKG_CONFIG_STATIC_DEF
+prefix=$(prefix)
+libdir=$(libdir)
+includedir=$(includedir)
+
+Name: $(LIBNAME)-static
+Description: $(MKL_APP_DESC_ONELINE) (static)
+Version: $(MKL_APP_VERSION)
+Cflags: -I$${includedir}
+Libs: -L$${libdir} $${libdir}/$(LIBNAME).a $(LIBS)
+endef
+
+export _PKG_CONFIG_STATIC_DEF
+
+$(LIBNAME0).pc: $(TOPDIR)/Makefile.config
+	@printf "$(MKL_YELLOW)Generating pkg-config file $@$(MKL_CLR_RESET)\n"
+	@echo "$$_PKG_CONFIG_DEF" > $@
+
+$(LIBNAME0)-static.pc: $(TOPDIR)/Makefile.config
+	@printf "$(MKL_YELLOW)Generating pkg-config file $@$(MKL_CLR_RESET)\n"
+	@echo "$$_PKG_CONFIG_STATIC_DEF" > $@
+
+lib-gen-pkg-config: $(LIBNAME0).pc $(LIBNAME0)-static.pc
+
+lib-clean-pkg-config:
+	rm -f $(LIBNAME0).pc $(LIBNAME0)-static.pc
+else
+lib-gen-pkg-config:
+lib-clean-pkg-config:
+endif
+
+
+$(BIN): $(OBJS)
+	@printf "$(MKL_YELLOW)Creating program $@$(MKL_CLR_RESET)\n"
+	$(CC) $(CPPFLAGS) $(LDFLAGS) $(OBJS) -o $@ $(LIBS)
+
+
+file-check:
+	@printf "$(MKL_YELLOW)Checking $(LIBNAME) integrity$(MKL_CLR_RESET)\n"
+	@RET=true ; \
+	for f in $(CHECK_FILES) ; do \
+		printf "%-30s " $$f ; \
+		if [ -f "$$f" ]; then \
+			printf "$(MKL_GREEN)OK$(MKL_CLR_RESET)\n" ; \
+		else \
+			printf "$(MKL_RED)MISSING$(MKL_CLR_RESET)\n" ; \
+			RET=false ; \
+		fi ; \
+	done ; \
+	$$RET
+
+
+lib-install:
+	@printf "$(MKL_YELLOW)Install $(LIBNAME) to $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n"
+	$(INSTALL) -d $$DESTDIR$(includedir)/$(PKGNAME) ; \
+	$(INSTALL) -d $$DESTDIR$(libdir) ; \
+	$(INSTALL) $(HDRS) $$DESTDIR$(includedir)/$(PKGNAME) ; \
+	$(INSTALL) $(LIBNAME).a $$DESTDIR$(libdir) ; \
+	$(INSTALL) $(LIBFILENAME) $$DESTDIR$(libdir) ; \
+	[ -f "$(LIBNAME0).pc" ] && ( \
+		$(INSTALL) -d $$DESTDIR$(pkgconfigdir) ; \
+		$(INSTALL) -m 0644 $(LIBNAME0).pc $$DESTDIR$(pkgconfigdir) \
+	) ; \
+	[ -f "$(LIBNAME0)-static.pc" ] && ( \
+		$(INSTALL) -d $$DESTDIR$(pkgconfigdir) ; \
+		$(INSTALL) -m 0644 $(LIBNAME0)-static.pc $$DESTDIR$(pkgconfigdir) \
+	) ; \
+	(cd $$DESTDIR$(libdir) && ln -sf $(LIBFILENAME) $(LIBFILENAMELINK))
+
+lib-uninstall:
+	@printf "$(MKL_YELLOW)Uninstall $(LIBNAME) from $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n"
+	for hdr in $(HDRS) ; do \
+		rm -f $$DESTDIR$(includedir)/$(PKGNAME)/$$hdr ; done
+	rm -f $$DESTDIR$(libdir)/$(LIBNAME).a
+	rm -f $$DESTDIR$(libdir)/$(LIBFILENAME)
+	rm -f $$DESTDIR$(libdir)/$(LIBFILENAMELINK)
+	rmdir $$DESTDIR$(includedir)/$(PKGNAME) || true
+
+
+
+bin-install:
+	@printf "$(MKL_YELLOW)Install $(BIN) to $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n"
+	$(INSTALL) -d $$DESTDIR$(bindir) && \
+	$(INSTALL) $(BIN) $$DESTDIR$(bindir) 
+
+bin-uninstall:
+	@printf "$(MKL_YELLOW)Uninstall $(BIN) from $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n"
+	rm -f $$DESTDIR$(bindir)/$(BIN)
+
+
+generic-clean:
+	rm -f $(OBJS) $(DEPS)
+
+lib-clean: generic-clean lib-clean-pkg-config
+	rm -f $(LIBNAME)*.a $(LIBFILENAME) $(LIBFILENAMELINK) \
+		$(LIBNAME).lds
+
+bin-clean: generic-clean
+	rm -f $(BIN)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mklove/modules/configure.atomics
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mklove/modules/configure.atomics b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.atomics
new file mode 100644
index 0000000..31639a7
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.atomics
@@ -0,0 +1,144 @@
+#!/bin/bash
+#
+# Checks for atomic ops:
+#  compiler builtin (__sync_..) and portable libatomic's (__atomic_..)
+# Will also provide abstraction by defining the prefix to use.
+#
+# Sets:
+#  HAVE_ATOMICS
+#  HAVE_ATOMICS_32
+#  HAVE_ATOMICS_64
+#  HAVE_ATOMICS_32_ATOMIC   __atomic interface
+#  HAVE_ATOMICS_32_SYNC     __sync interface
+#  HAVE_ATOMICS_64_ATOMIC   __atomic interface
+#  HAVE_ATOMICS_64_SYNC     __sync interface
+#  WITH_LIBATOMIC
+#  LIBS
+#
+#  ATOMIC_OP(OP1,OP2,PTR,VAL)
+#  ATOMIC_OP32(OP1,OP2,PTR,VAL)
+#  ATOMIC_OP64(OP1,OP2,PTR,VAL)
+#   where op* is 'add,sub,fetch'
+#   e.g:  ATOMIC_OP32(add, fetch, &i, 10)
+#         becomes __atomic_add_fetch(&i, 10, ..) or
+#                 __sync_add_and_fetch(&i, 10)
+#
+
+function checks {
+
+
+    # We prefer the newer __atomic stuff, but 64-bit atomics might
+    # require linking with -latomic, so we need to perform these tests
+    # in the proper order:
+    #   __atomic 32
+    #   __atomic 32 -latomic
+    #   __sync 32
+    #
+    #   __atomic 64
+    #   __atomic 64 -latomic
+    #   __sync 64
+
+    local _libs=
+    local _a32="__atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)"
+    local _a64="__atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)"
+
+    # 32-bit:
+    # Try fully builtin __atomic
+    if ! mkl_compile_check __atomic_32 HAVE_ATOMICS_32 cont CC "" \
+        "
+#include <inttypes.h>
+int32_t foo (int32_t i) {
+  return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
+}"
+        then
+        # Try __atomic with -latomic
+        if mkl_compile_check --ldflags="-latomic" __atomic_32_lib HAVE_ATOMICS_32 \
+            cont CC "" \
+            "
+#include <inttypes.h>
+int32_t foo (int32_t i) {
+  return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
+}"
+        then
+            _libs="-latomic"
+            mkl_allvar_set "__atomic_32_lib" "HAVE_ATOMICS_32_ATOMIC" "y"
+        else
+            # Try __sync interface
+            if mkl_compile_check __sync_32 HAVE_ATOMICS_32 disable CC "" \
+                "
+#include <inttypes.h>
+int32_t foo (int32_t i) {
+  return __sync_add_and_fetch(&i, 1);
+}"
+                then
+                _a32="__sync_ ## OP1 ## _and_ ## OP2(PTR, VAL)"
+                mkl_allvar_set "__sync_32" "HAVE_ATOMICS_32_SYNC" "y"
+            else
+                _a32=""
+            fi
+        fi
+    else
+        mkl_allvar_set "__atomic_32" "HAVE_ATOMICS_32_ATOMIC" "y"
+    fi
+
+
+    if [[ ! -z $_a32 ]]; then
+        mkl_define_set "atomic_32" "ATOMIC_OP32(OP1,OP2,PTR,VAL)" "code:$_a32"
+    fi
+
+
+
+    # 64-bit:
+    # Try fully builtin __atomic
+    if ! mkl_compile_check __atomic_64 HAVE_ATOMICS_64 cont CC "" \
+        "
+#include <inttypes.h>
+int64_t foo (int64_t i) {
+  return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
+}"
+        then
+        # Try __atomic with -latomic
+        if mkl_compile_check --ldflags="-latomic" __atomic_64_lib HAVE_ATOMICS_64 \
+            cont CC "" \
+            "
+#include <inttypes.h>
+int64_t foo (int64_t i) {
+  return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
+}"
+        then
+            _libs="-latomic"
+            mkl_allvar_set "__atomic_64_lib" "HAVE_ATOMICS_64_ATOMIC" "y"
+        else
+            # Try __sync interface
+            if mkl_compile_check __sync_64 HAVE_ATOMICS_64 disable CC "" \
+                "
+#include <inttypes.h>
+int64_t foo (int64_t i) {
+  return __sync_add_and_fetch(&i, 1);
+}"
+                then
+                _a64="__sync_ ## OP1 ## _and_ ## OP2 (PTR, VAL)"
+                mkl_allvar_set "__sync_64" "HAVE_ATOMICS_64_SYNC" "y"
+            else
+                _a64=""
+            fi
+        fi
+    else
+        mkl_allvar_set "__atomic_64" "HAVE_ATOMICS_64_ATOMIC" "y"
+    fi
+
+
+    if [[ ! -z $_a64 ]]; then
+        mkl_define_set "atomic_64" "ATOMIC_OP64(OP1,OP2,PTR,VAL)" "code:$_a64"
+
+        # Define generic ATOMIC() macro identical to 64-bit atomics"
+        mkl_define_set "atomic_64" "ATOMIC_OP(OP1,OP2,PTR,VAL)" "code:$_a64"
+    fi
+
+
+    if [[ ! -z $_libs ]]; then
+        mkl_mkvar_append LDFLAGS LDFLAGS "-Wl,--as-needed"
+        mkl_mkvar_append LIBS LIBS "$_libs"
+    fi
+
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mklove/modules/configure.base
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mklove/modules/configure.base b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.base
new file mode 100644
index 0000000..a776e43
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.base
@@ -0,0 +1,1771 @@
+#!/bin/bash
+#
+#
+# mklove base configure module, implements the mklove configure framework
+#
+
+MKL_MODULES="base"
+MKL_CACHEVARS=""
+MKL_MKVARS=""
+MKL_DEFINES=""
+MKL_CHECKS=""
+MKL_LOAD_STACK=""
+
+MKL_IDNEXT=1
+
+MKL_OUTMK=_mklout.mk
+MKL_OUTH=_mklout.h
+MKL_OUTDBG=config.log
+
+MKL_GENERATORS="base:mkl_generate_late_vars"
+MKL_CLEANERS=""
+
+MKL_FAILS=""
+MKL_LATE_VARS=""
+
+MKL_OPTS_SET=""
+
+MKL_RED=""
+MKL_GREEN=""
+MKL_YELLOW=""
+MKL_BLUE=""
+MKL_CLR_RESET=""
+
+
+MKL_NO_DOWNLOAD=0
+
+if [[ -z "$MKL_REPO_URL" ]]; then
+    MKL_REPO_URL="http://github.com/edenhill/mklove/raw/master"
+fi
+
+
+
+# Default mklove directory to PWD/mklove
+[[ -z "$MKLOVE_DIR" ]] && MKLOVE_DIR=mklove
+
+
+###########################################################################
+#
+# Variable types:
+#   env      - Standard environment variables.
+#   var      - mklove runtime variable, cached or not.
+#   mkvar    - Makefile variables, also sets runvar
+#   define   - config.h variables/defines
+#
+###########################################################################
+
+# Low level variable assignment
+# Arguments:
+#  variable name
+#  variable value
+function mkl_var0_set {
+    export "$1"="$2"
+}
+
+# Sets a runtime variable (only used during configure)
+# If cache=1 these variables are cached to config.cache.
+# Arguments:
+#  variable name
+#  variable value
+#  [ "cache" ]
+function mkl_var_set {
+    mkl_var0_set "$1" "$2"
+    if [[ $3 == "cache" ]]; then
+        if ! mkl_in_list "$MKL_CACHEVARS" "$1" ; then
+            MKL_CACHEVARS="$MKL_CACHEVARS $1"
+        fi
+    fi
+}
+
+# Unsets a mkl variable
+# Arguments:
+#  variable name
+function mkl_var_unset {
+    unset $1
+}
+
+# Appends to a mkl variable (space delimited)
+# Arguments:
+#  variable name
+#  variable value
+function mkl_var_append {
+    if [[ -z ${!1} ]]; then
+        mkl_var_set "$1" "$2"
+    else
+        mkl_var0_set "$1" "${!1} $2"
+    fi
+}
+
+
+# Prepends to a mkl variable (space delimited)
+# Arguments:
+#  variable name
+#  variable value
+function mkl_var_prepend {
+    if [[ -z ${!1} ]]; then
+        mkl_var_set "$1" "$2"
+    else
+        mkl_var0_set "$1" "$2 ${!1}"
+    fi
+}
+
+# Shift the first word off a variable.
+# Arguments:
+#  variable name
+function mkl_var_shift {
+    local n="${!1}"
+    mkl_var0_set "$1" "${n#* }"
+    return 0
+}
+
+
+# Returns the contents of mkl variable
+# Arguments:
+#  variable name
+function mkl_var_get {
+    echo "${!1}"
+}
+
+
+
+
+# Set environment variable (runtime)
+# These variables are not cached nor written to any of the output files,
+# its just simply a helper wrapper for standard envs.
+# Arguments:
+#  varname
+#  varvalue
+function mkl_env_set {
+    mkl_var0_set "$1" "$2"
+}
+
+# Append to environment variable
+# Arguments:
+#  varname
+#  varvalue
+#  [ separator (" ") ]
+function mkl_env_append {
+    local sep=" "
+    if [[ -z ${!1} ]]; then
+        mkl_env_set "$1" "$2"
+    else
+        [ ! -z ${3} ] && sep="$3"
+        mkl_var0_set "$1" "${!1}${sep}$2"
+    fi
+
+}
+
+# Prepend to environment variable
+# Arguments:
+#  varname
+#  varvalue
+#  [ separator (" ") ]
+function mkl_env_prepend {
+    local sep=" "
+    if [[ -z ${!1} ]]; then
+        mkl_env_set "$1" "$2"
+    else
+        [ ! -z ${3} ] && sep="$3"
+        mkl_var0_set "$1" "$2${sep}${!1}"
+    fi
+
+}
+
+
+
+
+# Set a make variable (Makefile.config)
+# Arguments:
+#  config name
+#  variable name
+#  value
+function mkl_mkvar_set {
+    if [[ ! -z $2 ]]; then
+        mkl_env_set "$2" "$3"
+        mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2
+    fi
+}
+
+
+# Prepends to a make variable (Makefile.config)
+# Arguments:
+#  config name
+#  variable name
+#  value
+function mkl_mkvar_prepend {
+    if [[ ! -z $2 ]]; then
+        mkl_env_prepend "$2" "$3"
+        mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2
+    fi
+}
+
+
+# Appends to a make variable (Makefile.config)
+# Arguments:
+#  config name
+#  variable name
+#  value
+function mkl_mkvar_append {
+    if [[ ! -z $2 ]]; then
+        mkl_env_append "$2" "$3"
+        mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2
+    fi
+}
+
+
+# Prepends to a make variable (Makefile.config)
+# Arguments:
+#  config name
+#  variable name
+#  value
+function mkl_mkvar_prepend {
+    if [[ ! -z $2 ]]; then
+        mkl_env_prepend "$2" "$3"
+        mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2
+    fi
+}
+
+# Return mkvar variable value
+# Arguments:
+#  variable name
+function mkl_mkvar_get {
+    [[ -z ${!1} ]] && return 1
+    echo ${!1}
+    return 0
+}
+
+
+
+# Defines a config header define (config.h)
+# Arguments:
+#  config name
+#  define name
+#  define value (optional, default: 1)
+#   if value starts with code: then no "" are added
+function mkl_define_set {
+
+    if [[ -z $2 ]]; then
+        return 0
+    fi
+
+    local stmt=""
+    local defid=
+    if [[ $2 = *\(* ]]; then
+        # macro
+        defid="def_${2%%(*}"
+    else
+        # define
+        defid="def_$2"
+    fi
+
+    [[ -z $1 ]] || stmt="// $1\n"
+
+    local val="$3"
+    if [[ -z "$val" ]]; then
+        val="$(mkl_def $2 1)"
+    fi
+
+    # Define as code, string or integer?
+    if [[ $val == code:* ]]; then
+        # Code block, copy verbatim without quotes, strip code: prefix
+        val=${val#code:}
+    elif [[ ! ( "$val" =~ ^[0-9]+([lL]?[lL][dDuU]?)?$ || \
+        "$val" =~ ^0x[0-9a-fA-F]+([lL]?[lL][dDuU]?)?$ ) ]]; then
+        # String: quote
+        val="\"$val\""
+    fi
+    # else: unquoted integer/hex
+
+    stmt="${stmt}#define $2 $val"
+    mkl_env_set "$defid" "$stmt"
+    mkl_env_append MKL_DEFINES "$defid"
+}
+
+
+
+
+
+# Sets "all" configuration variables, that is:
+# for name set: Makefile variable, config.h define
+# Will convert value "y"|"n" to 1|0 for config.h
+# Arguments:
+#  config name
+#  variable name
+#  value
+function mkl_allvar_set {
+    mkl_mkvar_set "$1" "$2" "$3"
+    local val=$3
+    if [[ $3 = "y" ]]; then
+        val=1
+    elif [[ $3 = "n" ]]; then
+        val=0
+    fi
+    mkl_define_set "$1" "$2" "$val"
+}
+
+
+
+
+###########################################################################
+#
+#
+# Check failure functionality
+#
+#
+###########################################################################
+
+
+# Summarize all fatal failures and then exits.
+function mkl_fail_summary {
+    echo "
+
+"
+
+    local pkg_cmd=""
+    local install_pkgs=""
+    mkl_err "###########################################################"
+    mkl_err "###                  Configure failed                   ###"
+    mkl_err "###########################################################"
+    mkl_err "### Accumulated failures:                               ###"
+    mkl_err "###########################################################"
+    local n
+    for n in $MKL_FAILS ; do
+        local conf=$(mkl_var_get MKL_FAIL__${n}__conf)
+        mkl_err  " $conf ($(mkl_var_get MKL_FAIL__${n}__define)) $(mkl_meta_get $conf name)"
+        if mkl_meta_exists $conf desc; then
+            mkl_err0 "      desc: $MKL_YELLOW$(mkl_meta_get $conf desc)$MKL_CLR_RESET"
+        fi
+        mkl_err0 "    module: $(mkl_var_get MKL_FAIL__${n}__module)"
+        mkl_err0 "    action: $(mkl_var_get MKL_FAIL__${n}__action)"
+        mkl_err0 "    reason:
+$(mkl_var_get MKL_FAIL__${n}__reason)
+"
+        # Dig up some metadata to assist the user
+        case $MKL_DISTRO in
+            Debian|Ubuntu|*)
+                local debs=$(mkl_meta_get $conf "deb")
+                pkg_cmd="sudo apt-get install"
+                if [[ ${#debs} > 0 ]]; then
+                    install_pkgs="$install_pkgs $debs"
+                fi
+                ;;
+        esac
+    done
+
+    if [[ ! -z $install_pkgs ]]; then
+        mkl_err "###########################################################"
+        mkl_err "### Installing the following packages might help:       ###"
+        mkl_err "###########################################################"
+        mkl_err0 "$pkg_cmd $install_pkgs"
+        mkl_err0 ""
+    fi
+    exit 1
+}
+
+
+# Checks if there were failures.
+# Returns 0 if there were no failures, else calls failure summary and exits.
+function mkl_check_fails {
+    if [[ ${#MKL_FAILS} = 0 ]]; then
+        return 0
+    fi
+    mkl_fail_summary
+}
+
+# A check has failed but we want to carry on (and we should!).
+# We fail it all later.
+# Arguments:
+#  config name
+#  define name
+#  action
+#  reason
+function mkl_fail {
+    local n="$(mkl_env_esc "$1")"
+    mkl_var_set "MKL_FAIL__${n}__conf" "$1"
+    mkl_var_set "MKL_FAIL__${n}__module" $MKL_MODULE
+    mkl_var_set "MKL_FAIL__${n}__define" $2
+    mkl_var_set "MKL_FAIL__${n}__action" "$3"
+    if [[ -z $(mkl_var_get "MKL_FAIL__${n}__reason") ]]; then
+        mkl_var_set "MKL_FAIL__${n}__reason" "$4"
+    else
+        mkl_var_append "MKL_FAIL__${n}__reason" "
+And also:
+$4"
+    fi
+    mkl_in_list "$MKL_FAILS" "$n" || mkl_var_append MKL_FAILS "$n"
+}
+
+
+# A check failed, handle it
+# Arguments:
+#  config name
+#  define name
+#  action (fail|disable|ignore|cont)
+#  reason
+function mkl_check_failed {
+    # Override action based on require directives, unless the action is
+    # set to cont (for fallthrough to sub-sequent tests).
+    local action="$3"
+    if [[ $3 != "cont" ]]; then
+        action=$(mkl_meta_get "MOD__$MKL_MODULE" "override_action" $3)
+    fi
+
+    # --fail-fatal option
+    [[ $MKL_FAILFATAL ]] && action="fail"
+
+    mkl_check_done "$1" "$2" "$action" "failed"
+
+    mkl_dbg "Check $1 ($2, action $action (originally $3)) failed: $4"
+
+
+    case $action in
+        fail)
+            # Check failed fatally, fail everything eventually
+            mkl_fail "$1" "$2" "$3" "$4$extra"
+            return 1
+            ;;
+
+        disable)
+            # Check failed, disable
+            [[ ! -z $2 ]] && mkl_mkvar_set "$1" "$2" "n"
+            return 1
+            ;;
+        ignore)
+            # Check failed but we ignore the results and set it anyway.
+            [[ ! -z $2 ]] && mkl_define_set "$1" "$2" "1"
+            [[ ! -z $2 ]] && mkl_mkvar_set "$1" "$2" "y"
+            return 1
+            ;;
+        cont)
+            # Check failed but we ignore the results and do nothing.
+            return 0
+            ;;
+    esac
+}
+
+
+
+
+###########################################################################
+#
+#
+# Output generators
+#
+#
+###########################################################################
+
+# Generate late variables.
+# Late variables are those referenced in command line option defaults
+# but then never set by --option.
+function mkl_generate_late_vars {
+    local n
+    for n in $MKL_LATE_VARS ; do
+        local func=${n%:*}
+        local safeopt=${func#opt_}
+        local val=${n#*:}
+        if mkl_in_list "$MKL_OPTS_SET" "$safeopt" ; then
+            # Skip options set explicitly with --option
+            continue
+        fi
+        # Expand variable references "\$foo" by calling eval
+        # and pass it opt_... function.
+        $func "$(eval echo $val)"
+    done
+}
+
+# Generate output files.
+# Must be called following a succesful configure run.
+function mkl_generate {
+    local mf=
+    for mf in $MKL_GENERATORS ; do
+        MKL_MODULE=${mf%:*}
+        local func=${mf#*:}
+        $func || exit 1
+    done
+
+    mkl_write_mk "# Automatically generated by $0 $*"
+    mkl_write_mk "# Config variables"
+    mkl_write_mk "#"
+    mkl_write_mk "# Generated by:"
+    mkl_write_mk "# $MKL_CONFIGURE_ARGS"
+    mkl_write_mk ""
+
+    # This variable is used by Makefile.base to avoid multiple inclusions.
+    mkl_write_mk "MKL_MAKEFILE_CONFIG=y"
+
+    # Export colors to Makefile.config
+    mkl_write_mk "MKL_RED=\t${MKL_RED}"
+    mkl_write_mk "MKL_GREEN=\t${MKL_GREEN}"
+    mkl_write_mk "MKL_YELLOW=\t${MKL_YELLOW}"
+    mkl_write_mk "MKL_BLUE=\t${MKL_BLUE}"
+    mkl_write_mk "MKL_CLR_RESET=\t${MKL_CLR_RESET}"
+
+    local n=
+    for n in $MKL_MKVARS ; do
+	# Some special variables should be prefixable by the caller, so
+	# define them in the makefile as appends.
+	local op="="
+	case $n in
+	    CFLAGS|CPPFLAGS|CXXFLAGS|LDFLAGS|LIBS)
+		op="+="
+		;;
+	esac
+        mkl_write_mk "$n$op\t${!n}"
+    done
+    mkl_write_mk "# End of config variables"
+
+    MKL_OUTMK_FINAL=Makefile.config
+    mv $MKL_OUTMK $MKL_OUTMK_FINAL
+
+    echo "Generated $MKL_OUTMK_FINAL"
+
+    # Generate config.h
+    mkl_write_h "// Automatically generated by $0 $*"
+    mkl_write_h "#ifndef _CONFIG_H_"
+    mkl_write_h "#define _CONFIG_H_"
+    for n in $MKL_DEFINES ; do
+        mkl_write_h "${!n}"
+    done
+    mkl_write_h "#endif /* _CONFIG_H_ */"
+
+    MKL_OUTH_FINAL=config.h
+    mv $MKL_OUTH $MKL_OUTH_FINAL
+
+    echo "Generated $MKL_OUTH_FINAL"
+}
+
+# Remove file noisily, if it exists
+function mkl_rm {
+    if [[ -f $fname ]]; then
+        echo "Removing $fname"
+        rm -f "$fname"
+    fi
+}
+
+# Remove files generated by configure
+function mkl_clean {
+    for fname in Makefile.config config.h config.cache config.log ; do
+        mkl_rm "$fname"
+    done
+
+    local mf=
+    for mf in $MKL_CLEANERS ; do
+        MKL_MODULE=${mf%:*}
+        local func=${mf#*:}
+        $func || exit 1
+    done
+
+}
+
+
+# Print summary of succesful configure run
+function mkl_summary {
+
+    echo "
+Configuration summary:"
+    local n=
+    for n in $MKL_MKVARS ; do
+        # Skip the boring booleans
+        if [[ $n == WITH_* || $n == WITHOUT_* || $n == HAVE_* || $n == def_* ]]; then
+            continue
+        fi
+        printf "  %-24s %s\n" "$n" "${!n}"
+    done
+}
+
+
+
+# Write to mk file
+# Argument:
+#  string ..
+function mkl_write_mk {
+    echo -e "$*" >> $MKL_OUTMK
+}
+
+# Write to header file
+# Argument:
+#  string ..
+function mkl_write_h {
+    echo -e "$*" >> $MKL_OUTH
+}
+
+
+
+###########################################################################
+#
+#
+# Logging and debugging
+#
+#
+###########################################################################
+
+# Debug print
+# Only visible on terminal if MKL_DEBUG is set.
+# Always written to config.log
+# Argument:
+#  string ..
+function mkl_dbg {
+    if [[ ! -z $MKL_DEBUG ]]; then
+        echo -e "${MKL_BLUE}DBG:$$: $*${MKL_CLR_RESET}" 1>&2
+    fi
+    echo "DBG: $*" >> $MKL_OUTDBG
+}
+
+# Error print (with color)
+# Always printed to terminal and config.log
+# Argument:
+#  string ..
+function mkl_err {
+    echo -e "${MKL_RED}$*${MKL_CLR_RESET}" 1>&2
+    echo "$*" >> $MKL_OUTDBG
+}
+
+# Same as mkl_err but without coloring
+# Argument:
+#  string ..
+function mkl_err0 {
+    echo -e "$*" 1>&2
+    echo "$*" >> $MKL_OUTDBG
+}
+
+# Standard print
+# Always printed to terminal and config.log
+# Argument:
+#  string ..
+function mkl_info {
+    echo -e "$*" 1>&2
+    echo -e "$*" >> $MKL_OUTDBG
+}
+
+
+
+
+
+
+
+###########################################################################
+#
+#
+# Misc helpers
+#
+#
+###########################################################################
+
+# Returns the absolute path (but not necesarily canonical) of the first argument
+function mkl_abspath {
+    echo $1 | sed -e "s|^\([^/]\)|$PWD/\1|"
+}
+
+# Returns true (0) if function $1 exists, else false (1)
+function mkl_func_exists {
+    declare -f "$1" > /dev/null
+    return $?
+}
+
+# Rename function.
+# Returns 0 on success or 1 if old function (origname) was not defined.
+# Arguments:
+#   origname
+#   newname
+function mkl_func_rename {
+    if ! mkl_func_exists $1 ; then
+        return 1
+    fi
+    local orig=$(declare -f $1)
+    local new="$2${orig#$1}"
+    eval "$new"
+    unset -f "$1"
+    return 0
+}
+
+
+# Push module function for later call by mklove.
+# The function is renamed to an internal name.
+# Arguments:
+#  list variable name
+#  module name
+#  function name
+function mkl_func_push {
+    local newfunc="__mkl__f_${2}_$(( MKL_IDNEXT++ ))"
+    if mkl_func_rename "$3" "$newfunc" ; then
+        mkl_var_append "$1" "$2:$newfunc"
+    fi
+}
+
+
+
+# Returns value, or the default string if value is empty.
+# Arguments:
+#  value
+#  default
+function mkl_def {
+    if [[ ! -z $1 ]]; then
+        echo $1
+    else
+        echo $2
+    fi
+}
+
+
+# Render a string (e.g., evaluate its $varrefs)
+# Arguments:
+#  string
+function mkl_render {
+    if [[ $* == *\$* ]]; then
+        eval "echo $*"
+    else
+        echo "$*"
+    fi
+}
+
+# Escape a string so that it becomes suitable for being an env variable.
+# This is a destructive operation and the original string cannot be restored.
+function mkl_env_esc {
+    echo $* | LC_ALL=C sed -e 's/[^a-zA-Z0-9_]/_/g'
+}
+
+# Convert arguments to upper case
+function mkl_upper {
+    echo "$*" | tr '[:lower:]' '[:upper:]'
+}
+
+# Convert arguments to lower case
+function mkl_lower {
+    echo "$*" | tr '[:upper:]' '[:lower:]'
+}
+
+
+# Checks if element is in list
+# Arguments:
+#   list
+#   element
+function mkl_in_list {
+    local n
+    for n in $1 ; do
+        [[ $n == $2 ]] && return 0
+    done
+    return 1
+}
+
+
+
+
+###########################################################################
+#
+#
+# Cache functionality
+#
+#
+###########################################################################
+
+
+# Write cache file
+function mkl_cache_write {
+    [[ ! -z "$MKL_NOCACHE" ]] && return 0
+    echo "# mklove configure cache file generated at $(date)" > config.cache
+    for n in $MKL_CACHEVARS ; do
+        echo "$n=${!n}" >> config.cache
+    done
+    echo "Generated config.cache"
+}
+
+
+# Read cache file
+function mkl_cache_read {
+    [[ ! -z "$MKL_NOCACHE" ]] && return 0
+    [ -f config.cache ] || return 1
+
+    echo "using cache file config.cache"
+
+    local ORIG_IFS=$IFS
+    IFS="$IFS="
+    while read -r n v ; do
+        [[ -z $n || $n = \#* || -z $v ]] && continue
+        mkl_var_set $n $v cache
+    done < config.cache
+    IFS=$ORIG_IFS
+}
+
+
+###########################################################################
+#
+#
+# Config name meta data
+#
+#
+###########################################################################
+
+# Set metadata for config name
+# This metadata is used by mkl in various situations
+# Arguments:
+#   config name
+#   metadata key
+#   metadata value (appended)
+function mkl_meta_set {
+    local metaname="mkl__$1__$2"
+    eval "$metaname=\"\$$metaname $3\""
+}
+
+# Returns metadata for config name
+# Arguments:
+#   config name
+#   metadata key
+#   default (optional)
+function mkl_meta_get {
+    local metaname="mkl__$1__$2"
+    if [[ ! -z ${!metaname} ]]; then
+        echo ${!metaname}
+    else
+        echo "$3"
+    fi
+}
+
+# Checks if metadata exists
+# Arguments:
+#   config name
+#   metadata key
+function mkl_meta_exists {
+    local metaname="mkl__$1__$2"
+    if [[ ! -z ${!metaname} ]]; then
+        return 0
+    else
+        return 1
+    fi
+}
+
+
+
+
+
+###########################################################################
+#
+#
+# Check framework
+#
+#
+###########################################################################
+
+
+# Print that a check is beginning to run
+# Returns 0 if a cached result was used (do not continue with your tests),
+# else 1.
+#
+# If the check should not be cachable then specify argument 3 as "no-cache",
+# this is useful when a check not only checks but actually sets config
+# variables itself (which is not recommended, but desired sometimes).
+#
+# Arguments:
+#  [ --verb "verb.." ]  (replace "checking for")
+#  config name
+#  define name
+#  action  (fail,cont,disable or no-cache)
+#  [ display name ]
+function mkl_check_begin {
+    local verb="checking for"
+    if [[ $1 == "--verb" ]]; then
+        verb="$2"
+        shift
+        shift
+    fi
+
+    local name=$(mkl_meta_get $1 name "$4")
+    [[ -z $name ]] && name="x:$1"
+
+    echo -n "$verb $name..."
+    if [[ $3 != "no-cache" ]]; then
+        local status=$(mkl_var_get "MKL_STATUS_$1")
+        # Check cache (from previous run or this one).
+        # Only used cached value if the cached check succeeded:
+        # it is more likely that a failed check has been fixed than the other
+        # way around.
+        if [[ ! -z $status && ( $status = "ok" ) ]]; then
+            mkl_check_done "$1" "$2" "$3" $status "cached"
+            return 0
+        fi
+    fi
+    return 1
+}
+
+# Print that a check is done
+# Arguments:
+#  config name
+#  define name
+#  action
+#  status (ok|failed)
+#  extra-info (optional)
+function mkl_check_done {
+    # Clean up configname to be a safe varname
+    local cname=${1//-/_}
+    mkl_var_set "MKL_STATUS_$cname" "$4" cache
+
+    local extra=""
+    if [[ $4 = "failed" ]]; then
+        local clr=$MKL_YELLOW
+        extra=" ($3)"
+        case "$3" in
+            fail)
+                clr=$MKL_RED
+                ;;
+            cont)
+                extra=""
+                ;;
+        esac
+        echo -e " $clr$4$MKL_CLR_RESET${extra}"
+    else
+        [[ ! -z $2 ]] && mkl_define_set "$cname" "$2" "1"
+        [[ ! -z $2 ]] && mkl_mkvar_set  "$cname" "$2" "y"
+        [ ! -z "$5" ] && extra=" ($5)"
+        echo -e " $MKL_GREEN$4${MKL_CLR_RESET}$extra"
+    fi
+}
+
+
+# Perform configure check by compiling source snippet
+# Arguments:
+#  [--ldflags="..." ]  (appended after "compiler arguments" below)
+#  config name
+#  define name
+#  action (fail|disable)
+#  compiler (CC|CXX)
+#  compiler arguments (optional "", example: "-lzookeeper")
+#  source snippet
+function mkl_compile_check {
+    local ldf=
+    if [[ $1 == --ldflags=* ]]; then
+	ldf=${1#*=}
+	shift
+    fi
+    mkl_check_begin "$1" "$2" "$3" "$1 (by compile)" && return $?
+
+    local cflags=
+
+    if [[ $4 = "CXX" ]]; then
+        local ext=cpp
+        cflags="$(mkl_mkvar_get CXXFLAGS)"
+    else
+        local ext=c
+        cflags="$(mkl_mkvar_get CFLAGS)"
+    fi
+
+    local srcfile=$(mktemp _mkltmpXXXXXX)
+    mv "$srcfile" "${srcfile}.$ext"
+    srcfile="$srcfile.$ext"
+    echo "$6" > $srcfile
+    echo "
+int main () { return 0; }
+" >> $srcfile
+
+    local cmd="${!4} $cflags $(mkl_mkvar_get CPPFLAGS) -Wall -Werror $srcfile -o ${srcfile}.o $ldf $(mkl_mkvar_get LDFLAGS) $5";
+    mkl_dbg "Compile check $1 ($2): $cmd"
+
+    local output
+    output=$($cmd 2>&1)
+
+    if [[ $? != 0 ]] ; then
+        mkl_dbg "compile check for $1 ($2) failed: $cmd: $output"
+        mkl_check_failed "$1" "$2" "$3" "compile check failed:
+CC: $4
+flags: $5
+$cmd:
+$output
+source: $6"
+        local ret=1
+    else
+        mkl_check_done "$1" "$2" "$3" "ok"
+        local ret=0
+    fi
+
+    # OSX XCode toolchain creates dSYM directories when -g is set,
+    # delete them specifically.
+    rm -rf "$srcfile" "${srcfile}.o" "$srcfile*dSYM"
+
+    return $ret
+}
+
+
+# Try to link with a library.
+# Arguments:
+#  config name
+#  define name
+#  action (fail|disable)
+#  linker flags (e.g. "-lpthreads")
+function mkl_link_check {
+    mkl_check_begin "$1" "$2" "$3" "$1 (by linking)" && return $?
+
+    local srcfile=$(mktemp _mktmpXXXXXX)
+    echo "int main () { return 0; }" > $srcfile
+
+    local cmd="${CC} $(mkl_mkvar_get LDFLAGS) -c $srcfile -o ${srcfile}.o $4";
+    mkl_dbg "Link check $1 ($2): $cmd"
+
+    local output
+    output=$($cmd 2>&1)
+
+    if [[ $? != 0 ]] ; then
+        mkl_dbg "link check for $1 ($2) failed: $output"
+        mkl_check_failed "$1" "$2" "$3" "compile check failed:
+$output"
+        local ret=1
+    else
+        mkl_check_done "$1" "$2" "$3" "ok" "$4"
+        local ret=0
+    fi
+
+    rm -f $srcfile*
+    return $ret
+}
+
+
+
+# Tries to figure out if we can use a static library or not.
+# Arguments:
+#  library name   (e.g. -lrdkafka)
+#  compiler flags (optional "", e.g: "-lyajl")
+# Returns/outputs:
+#  New list of compiler flags
+function mkl_lib_check_static {
+    local libname=$1
+    local libs=$2
+    local arfile_var=STATIC_LIB_${libname#-l}
+
+    mkl_dbg "Check $libname for static library (libs $libs, arfile variable $arfile_var=${!arfile_var})"
+
+    # If STATIC_LIB_<libname_without_-l> specifies an existing .a file we
+    # use that instead.
+    if [[ -f ${!arfile_var} ]]; then
+	libs=$(echo $libs | sed -e "s|$libname|${!arfile_var}|g")
+    elif [[ $HAS_LDFLAGS_STATIC == y ]]; then
+        libs=$(echo $libs | sed -e "s|$libname|${LDFLAGS_STATIC} $libname ${LDFLAGS_DYNAMIC}|g")
+    else
+        mkl_dbg "$libname: Neither $arfile_var specified or static linker flags supported: static linking probably won't work"
+    fi
+
+    echo $libs
+}
+
+
+# Checks that the specified lib is available through a number of methods.
+# compiler flags are automatically appended to "LIBS" mkvar on success.
+#
+# If STATIC_LIB_<libname_without_-l> is set to the path of an <libname>.a file
+# it will be used instead of -l<libname>.
+#
+# Arguments:
+#  [--static=<lib>]  (allows static linking (--enable-static) for the
+#                     library provided, e.g.: --static=-lrdkafka "librdkafka"..)
+#  [--libname=<lib>] (library name if different from config name, such as
+#                     when the libname includes a dash)
+#  config name (library name (for pkg-config))
+#  define name
+#  action (fail|disable|cont)
+#  compiler (CC|CXX)
+#  compiler flags (optional "", e.g: "-lyajl")
+#  source snippet
+function mkl_lib_check {
+
+    local is_static=0
+    local staticopt=
+    if [[ $1 == --static* ]]; then
+        staticopt=$1
+        shift
+    fi
+
+    local libnameopt=
+    local libname=$1
+    if [[ $1 == --libname* ]]; then
+        libnameopt=$1
+        libname="${libnameopt#*=}"
+        shift
+    fi
+
+    # pkg-config result (0=ok)
+    local pkg_conf_failed=1
+    if [[ $WITH_PKGCONFIG == "y" ]]; then
+        # Let pkg-config populate CFLAGS, et.al.
+        mkl_pkg_config_check $staticopt $libnameopt "$1" "" cont
+        pkg_conf_failed=$?
+    fi
+
+    local libs=""
+    if [[ $pkg_conf_failed ]]; then
+        libs="$5"
+        if [[ $WITH_STATIC_LINKING == y && ! -z $staticopt ]]; then
+            libs=$(mkl_lib_check_static "${staticopt#*=}" "$libs")
+            is_static=1
+        fi
+    fi
+
+    if ! mkl_compile_check "$1" "$2" "$3" "$4" "$libs" "$6"; then
+        return 1
+    fi
+
+    if [[ $pkg_conf_failed == 1 ]]; then
+        # Add libraries in reverse order to make sure inter-dependencies
+        # are resolved in the correct order.
+        # E.g., check for crypto and then ssl should result in -lssl -lcrypto
+        mkl_mkvar_prepend "$1" LIBS "$libs"
+    fi
+
+    return 0
+}
+
+
+# Check for library with pkg-config
+# Automatically sets CFLAGS and LIBS from pkg-config information.
+# Arguments:
+#  [--static=<lib>]  (allows static linking (--enable-static) for the
+#                     library provided, e.g.: --static=-lrdkafka "librdkafka"..)
+#  [--libname=<lib>] (library name if different from config name, such as
+#                     when the libname includes a dash)
+#  config name
+#  define name
+#  action (fail|disable|ignore)
+function mkl_pkg_config_check {
+
+    local staticopt=
+    if [[ $1 == --static* ]]; then
+        staticopt=$1
+        shift
+    fi
+
+    local libname=$1
+    if [[ $1 == --libname* ]]; then
+        libname="${libnameopt#*=}"
+        shift
+    fi
+
+    local cname="${1}_PKGCONFIG"
+    mkl_check_begin "$cname" "$2" "no-cache" "$1 (by pkg-config)" && return $?
+
+    local cflags=
+    local cmd="${PKG_CONFIG} --short-errors --cflags $libname"
+    mkl_dbg "pkg-config check $libname ($2): $cmd"
+
+    cflags=$($cmd 2>&1)
+    if [[ $? != 0 ]]; then
+        mkl_dbg "'$cmd' failed: $cflags"
+        mkl_check_failed "$cname" "$2" "$3" "'$cmd' failed:
+$cflags"
+        return 1
+    fi
+
+    local libs=
+    libs=$(${PKG_CONFIG} --short-errors --libs $libname 2>&1)
+    if [[ $? != 0 ]]; then
+        mkl_dbg "${PKG_CONFIG} --libs $libname failed: $libs"
+        mkl_check_failed "$cname" "$2" "$3" "pkg-config --libs failed"
+        return 1
+    fi
+
+    mkl_mkvar_append $1 "CFLAGS" "$cflags"
+
+    if [[ $WITH_STATIC_LINKING == y && ! -z $staticopt ]]; then
+        libs=$(mkl_lib_check_static "${staticopt#*=}" "$libs")
+    fi
+    mkl_mkvar_prepend "$1" LIBS "$libs"
+
+    mkl_check_done "$1" "$2" "$3" "ok"
+
+    return 0
+}
+
+
+# Check that a command runs and exits succesfully.
+# Arguments:
+#  config name
+#  define name (optional, can be empty)
+#  action
+#  command
+function mkl_command_check {
+    mkl_check_begin "$1" "$2" "$3" "$1 (by command)" && return $?
+
+    local out=
+    out=$($4 2>&1)
+    if [[ $? != 0 ]]; then
+        mkl_dbg "$1: $2: $4 failed: $out"
+        mkl_check_failed "$1" "$2" "$3" "command '$4' failed:
+$out"
+        return 1
+    fi
+
+    mkl_check_done "$1" "$2" "$3" "ok"
+
+    return 0
+}
+
+
+# Check that a program is executable, but will not execute it.
+# Arguments:
+#  config name
+#  define name (optional, can be empty)
+#  action
+#  program name  (e.g, objdump)
+function mkl_prog_check {
+    mkl_check_begin --verb "checking executable" "$1" "$2" "$3" "$1" && return $?
+
+    local out=
+    out=$(command -v "$4" 2>&1)
+    if [[ $? != 0 ]]; then
+        mkl_dbg "$1: $2: $4 is not executable: $out"
+        mkl_check_failed "$1" "$2" "$3" "$4 is not executable"
+        return 1
+    fi
+
+    mkl_check_done "$1" "$2" "$3" "ok"
+
+    return 0
+}
+
+
+
+
+# Checks that the check for the given config name passed.
+# This does not behave like the other checks, if the given config name passed
+# its test then nothing is printed. Else the configure will fail.
+# Arguments:
+#  checked config name
+function mkl_config_check {
+    local status=$(mkl_var_get "MKL_STATUS_$1")
+    [[ $status = "ok" ]] && return 0
+    mkl_fail $1 "" "fail" "$MKL_MODULE requires $1"
+    return 1
+}
+
+
+# Checks that all provided config names are set.
+# Arguments:
+#  config name
+#  define name
+#  action
+#  check_config_name1
+#  check_config_name2..
+function mkl_config_check_all {
+    local cname=
+    local res="ok"
+    echo start this now for $1
+    for cname in ${@:4}; do
+        local st=$(mkl_var_get "MKL_STATUS_$cname")
+        [[ $status = "ok" ]] && continue
+        mkl_fail $1 $2 $3 "depends on $cname"
+        res="failed"
+    done
+
+    echo "try res $res"
+    mkl_check_done "$1" "$2" "$3" $res
+}
+
+
+# Check environment variable
+# Arguments:
+#  config name
+#  define name
+#  action
+#  environment variable
+function mkl_env_check {
+    mkl_check_begin "$1" "$2" "$3" "$1 (by env $4)" && return $?
+
+    if [[ -z ${!4} ]]; then
+        mkl_check_failed "$1" "$2" "$3" "environment variable $4 not set"
+        return 1
+    fi
+
+    mkl_check_done "$1" "$2" "$3" "ok" "${!4}"
+
+    return 0
+}
+
+
+# Run all checks
+function mkl_checks_run {
+    # Set up common variables
+    mkl_allvar_set "" MKL_APP_NAME $(mkl_meta_get description name)
+    mkl_allvar_set "" MKL_APP_DESC_ONELINE "$(mkl_meta_get description oneline)"
+
+    # Call checks functions in dependency order
+    local mf
+    for mf in $MKL_CHECKS ; do
+        MKL_MODULE=${mf%:*}
+        local func=${mf#*:}
+
+        if mkl_func_exists $func ; then
+            $func
+        else
+            mkl_err "Check function $func from $MKL_MODULE disappeared ($mf)"
+        fi
+        unset MKL_MODULE
+    done
+}
+
+
+# Check for color support in terminal.
+# If the terminal supports colors, the function will alter
+#  MKL_RED
+#  MKL_GREEN
+#  MKL_YELLOW
+#  MKL_BLUE
+#  MKL_CLR_RESET
+function mkl_check_terminal_color_support {
+    local use_color=false
+    local has_tput=false
+
+    if [[ -z ${TERM} ]]; then
+        # tput and dircolors require $TERM
+        mkl_dbg "\$TERM is not set! Cannot check for color support in terminal."
+        return 1
+    elif hash tput 2>/dev/null; then
+        has_tput=true
+        [[ $(tput colors 2>/dev/null) -ge 8 ]] && use_color=true
+        mkl_dbg "tput reports color support: ${use_color}"
+    elif hash dircolors 2>/dev/null; then
+        # Enable color support only on colorful terminals.
+        # dircolors --print-database uses its own built-in database
+        # instead of using /etc/DIR_COLORS. Try to use the external file
+        # first to take advantage of user additions.
+        local safe_term=${TERM//[^[:alnum:]]/?}
+        local match_lhs=""
+        [[ -f ~/.dir_colors   ]] && match_lhs="${match_lhs}$(<~/.dir_colors)"
+        [[ -f /etc/DIR_COLORS ]] && match_lhs="${match_lhs}$(</etc/DIR_COLORS)"
+        [[ -z ${match_lhs}    ]] && match_lhs=$(dircolors --print-database)
+        [[ $'\n'${match_lhs} == *$'\n'"TERM "${safe_term}* ]] && use_color=true
+        mkl_dbg "dircolors reports color support: ${use_color}"
+    fi
+
+    if ${use_color}; then
+        if ${has_tput}; then
+            # In theory, user could have set different escape sequences
+            # Because tput is available we can use it to query the right values ...
+            mkl_dbg "Using color escape sequences from tput"
+            MKL_RED=$(tput setaf 1)
+            MKL_GREEN=$(tput setaf 2)
+            MKL_YELLOW=$(tput setaf 3)
+            MKL_BLUE=$(tput setaf 4)
+            MKL_CLR_RESET=$(tput sgr0)
+        else
+            mkl_dbg "Using hard-code ANSI color escape sequences"
+            MKL_RED="\033[031m"
+            MKL_GREEN="\033[032m"
+            MKL_YELLOW="\033[033m"
+            MKL_BLUE="\033[034m"
+            MKL_CLR_RESET="\033[0m"
+        fi
+    else
+        mkl_dbg "Did not detect color support in \"$TERM\" terminal!"
+    fi
+
+    return 0
+}
+
+
+
+
+###########################################################################
+#
+#
+# Module functionality
+#
+#
+###########################################################################
+
+# Downloads module from repository.
+# Arguments:
+#  module name
+# Returns:
+#  module file name
+function mkl_module_download {
+    local modname="$1"
+    local url="$MKL_REPO_URL/modules/configure.$modname"
+    local tmpfile=""
+
+    fname="${MKLOVE_DIR}/modules/configure.$modname"
+
+    if [[ $url != http*://* ]]; then
+        # Local path, just copy file.
+        if [[ ! -f $url ]]; then
+            mkl_err "Module $modname not found at $url"
+            return 1
+        fi
+
+        if ! cp "$url" "$fname" ; then
+            mkl_err "Failed to copy $url to $fname"
+            return 1
+        fi
+
+        echo "$fname"
+        return 0
+    fi
+
+    # Download
+    mkl_info "${MKL_BLUE}downloading missing module $modname from $url${MKL_CLR_RESET}"
+
+    tmpfile=$(mktemp _mkltmpXXXXXX)
+    local out=
+    out=$(wget -nv -O "$tmpfile" "$url" 2>&1)
+
+    if [[ $? -ne 0 ]]; then
+        rm -f "$tmpfile"
+        mkl_err "Failed to download $modname:"
+        mkl_err0 $out
+        return 1
+    fi
+
+    # Move downloaded file into place replacing the old file.
+    mv "$tmpfile" "$fname" || return 1
+
+    # "Return" filename
+    echo "$fname"
+
+    return 0
+}
+
+
+# Load module by name or filename
+# Arguments:
+#   "require"|"try"
+#   filename
+# [ module arguments ]
+function mkl_module_load {
+    local try=$1
+    shift
+    local fname=$1
+    shift
+    local modname=${fname#*configure.}
+    local bypath=1
+
+    # Check if already loaded
+    if mkl_in_list "$MKL_MODULES" "$modname"; then
+        return 0
+    fi
+
+    if [[ $fname = $modname ]]; then
+        # Module specified by name, find the file.
+        bypath=0
+        for fname in configure.$modname \
+            ${MKLOVE_DIR}/modules/configure.$modname ; do
+            [[ -s $fname ]] && break
+        done
+    fi
+
+    # Calling module
+    local cmod=$MKL_MODULE
+    [[ -z $cmod ]] && cmod="base"
+
+    if [[ ! -s $fname ]]; then
+        # Attempt to download module, if permitted
+        if [[ $MKL_NO_DOWNLOAD != 0 || $bypath == 1 ]]; then
+            mkl_err "Module $modname not found at $fname (required by $cmod) and downloads disabled"
+            if [[ $try = "require" ]]; then
+                mkl_fail "$modname" "none" "fail" \
+                    "Module $modname not found (required by $cmod) and downloads disabled"
+            fi
+            return 1
+        fi
+
+        fname=$(mkl_module_download "$modname")
+        if [[ $? -ne 0 ]]; then
+            mkl_err "Module $modname not found (required by $cmod)"
+            if [[ $try = "require" ]]; then
+                mkl_fail "$modname" "none" "fail" \
+                    "Module $modname not found (required by $cmod)"
+                return 1
+            fi
+        fi
+
+        # Now downloaded, try loading the module again.
+        mkl_module_load $try "$fname" "$@"
+        return $?
+    fi
+
+    # Set current module
+    local save_MKL_MODULE=$MKL_MODULE
+    MKL_MODULE=$modname
+
+    mkl_dbg "Loading module $modname (required by $cmod) from $fname"
+
+    # Source module file (positional arguments are available to module)
+    source $fname
+
+    # Restore current module (might be recursive)
+    MKL_MODULE=$save_MKL_MODULE
+
+    # Add module to list of modules
+    mkl_var_append MKL_MODULES $modname
+
+    # Rename module's special functions so we can call them separetely later.
+    mkl_func_rename "options" "${modname}_options"
+    mkl_func_push MKL_CHECKS "$modname" "checks"
+    mkl_func_push MKL_GENERATORS "$modname" "generate"
+    mkl_func_push MKL_CLEANERS "$modname" "clean"
+}
+
+
+# Require and load module
+# Must only be called from module file outside any function.
+# Arguments:
+#  [ --try ]    Dont fail if module doesn't exist
+#  module1
+#  [ "must" "pass" ]
+#  [ module arguments ... ]
+function mkl_require {
+    local try="require"
+    if [[ $1 = "--try" ]]; then
+        local try="try"
+        shift
+    fi
+
+    local mod=$1
+    shift
+    local override_action=
+
+    # Check for cyclic dependencies
+    if mkl_in_list "$MKL_LOAD_STACK" "$mod"; then
+        mkl_err "Cyclic dependency detected while loading $mod module:"
+        local cmod=
+        local lmod=$mod
+        for cmod in $MKL_LOAD_STACK ; do
+            mkl_err "  $lmod required by $cmod"
+            lmod=$cmod
+        done
+        mkl_fail base "" fail "Cyclic dependency detected while loading module $mod"
+        return 1
+    fi
+
+    mkl_var_prepend MKL_LOAD_STACK "$mod"
+
+
+    if [[ "$1 $2" == "must pass" ]]; then
+        shift
+        shift
+        override_action="fail"
+    fi
+
+    if [[ ! -z $override_action ]]; then
+        mkl_meta_set "MOD__$mod" "override_action" "$override_action"
+    fi
+
+
+    mkl_module_load $try $mod "$@"
+    local ret=$?
+
+    mkl_var_shift MKL_LOAD_STACK
+
+    return $ret
+}
+
+
+
+###########################################################################
+#
+#
+# Usage options
+#
+#
+###########################################################################
+
+
+MKL_USAGE="Usage: ./configure [OPTIONS...]
+
+ mklove configure script - mklove, not autoconf
+ Copyright (c) 2014-2015 Magnus Edenhill - https://github.com/edenhill/mklove
+"
+
+function mkl_usage {
+    echo "$MKL_USAGE"
+    local name=$(mkl_meta_get description name)
+
+    if [[ ! -z ${name} ]]; then
+	echo " $name - $(mkl_meta_get description oneline)
+ $(mkl_meta_get description copyright)
+"
+    fi
+
+    local og
+    for og in $MKL_USAGE_GROUPS ; do
+        og="MKL_USAGE_GROUP__$og"
+        echo "${!og}"
+    done
+
+    echo "Honoured environment variables:
+  CC, CPP, CXX, CFLAGS, CPPFLAGS, CXXFLAGS, LDFLAGS, LIBS,
+  LD, NM, OBJDUMP, STRIP, PKG_CONFIG, PKG_CONFIG_PATH,
+  STATIC_LIB_<libname>=.../libname.a
+
+"
+
+}
+
+
+
+# Add usage option informative text
+# Arguments:
+#  text
+function mkl_usage_info {
+    MKL_USAGE="$MKL_USAGE
+$1"
+}
+
+
+# Add option to usage output
+# Arguments:
+#  option group ("Standard", "Cross-Compilation", etc..)
+#  variable name
+#  option ("--foo=feh")
+#  help
+#  default (optional)
+#  assignvalue (optional, default:"y")
+#  function block (optional)
+function mkl_option {
+    local optgroup=$1
+    local varname=$2
+
+    # Fixed width between option name and help in usage output
+    local pad="                                   "
+    if [[ ${#3} -lt ${#pad} ]]; then
+        pad=${pad:0:$(expr ${#pad} - ${#3})}
+    else
+        pad=""
+    fi
+
+    # Add to usage output
+    local optgroup_safe=$(mkl_env_esc $optgroup)
+    if ! mkl_in_list "$MKL_USAGE_GROUPS" "$optgroup_safe" ; then
+        mkl_env_append MKL_USAGE_GROUPS "$optgroup_safe"
+        mkl_env_set "MKL_USAGE_GROUP__$optgroup_safe" "$optgroup options:
+"
+    fi
+
+    local defstr=""
+    [[ ! -z $5 ]] && defstr=" [$5]"
+    mkl_env_append "MKL_USAGE_GROUP__$optgroup_safe" "  $3 $pad $4$defstr
+"
+
+    local optname="${3#--}"
+    local safeopt=
+    local optval=""
+    if [[ $3 == *=* ]]; then
+        optname="${optname%=*}"
+        optval="${3#*=}"
+    fi
+
+    safeopt=$(mkl_env_esc $optname)
+
+    mkl_meta_set "MKL_OPT_ARGS" "$safeopt" "$optval"
+
+    #
+    # Optional variable scoping by prefix: "env:", "mk:", "def:"
+    #
+    local setallvar="mkl_allvar_set ''"
+    local setmkvar="mkl_mkvar_set ''"
+
+    if [[ $varname = env:* ]]; then
+        # Set environment variable (during configure runtime only)
+        varname=${varname#*:}
+        setallvar=mkl_env_set
+        setmkvar=mkl_env_set
+    elif [[ $varname = mk:* ]]; then
+        # Set Makefile.config variable
+        varname=${varname#*:}
+        setallvar="mkl_mkvar_append ''"
+        setmkvar="mkl_mkvar_append ''"
+    elif [[ $varname = def:* ]]; then
+        # Set config.h define
+        varname=${varname#*:}
+        setallvar="mkl_define_set ''"
+        setmkvar="mkl_define_set ''"
+    fi
+
+
+    if [[ ! -z $7 ]]; then
+        # Function block specified.
+        eval "function opt_$safeopt { $7 }"
+    else
+    # Add default implementation of function simply setting the value.
+    # Application may override this by redefining the function after calling
+    # mkl_option.
+        if [[ $optval = "PATH" ]]; then
+        # PATH argument: make it an absolute path.
+        # Only set the make variable (not config.h)
+            eval "function opt_$safeopt { $setmkvar $varname \"\$(mkl_abspath \$(mkl_render \$1))\"; }"
+        else
+        # Standard argument: simply set the value
+            if [[ -z "$6" ]]; then
+                eval "function opt_$safeopt { $setallvar $varname \"\$1\"; }"
+            else
+                eval "function opt_$safeopt { $setallvar $varname \"$6\"; }"
+            fi
+        fi
+    fi
+
+    # If default value is provided and does not start with "$" (variable ref)
+    # then set it right away.
+    # $ variable refs are set after all checks have run during the
+    # generating step.
+    if [[ ${#5} != 0 ]] ; then
+        if [[ $5 = *\$* ]]; then
+            mkl_var_append "MKL_LATE_VARS" "opt_$safeopt:$5"
+        else
+            opt_$safeopt $5
+        fi
+    fi
+
+    if [[ ! -z $varname ]]; then
+        # Add variable to list
+        MKL_CONFVARS="$MKL_CONFVARS $varname"
+    fi
+
+}
+
+
+
+# Adds a toggle (--enable-X, --disable-X) option.
+# Arguments:
+#  option group   ("Standard", ..)
+#  variable name  (WITH_FOO)
+#  option         (--enable-foo)
+#  help           ("foo.." ("Enable" and "Disable" will be prepended))
+#  default        (y or n)
+
+function mkl_toggle_option {
+
+    # Add option argument
+    mkl_option "$1" "$2" "$3" "$4" "$5"
+
+    # Add corresponding "--disable-foo" option for "--enable-foo".
+    local disname="${3/--enable/--disable}"
+    local dishelp="${4/Enable/Disable}"
+    mkl_option "$1" "$2" "$disname" "$dishelp" "" "n"
+}
+
+# Adds a toggle (--enable-X, --disable-X) option with builtin checker.
+# This is the library version.
+# Arguments:
+#  option group   ("Standard", ..)
+#  config name    (foo, must be same as pkg-config name)
+#  variable name  (WITH_FOO)
+#  action         (fail or disable)
+#  option         (--enable-foo)
+#  help           (defaults to "Enable <config name>")
+#  linker flags   (-lfoo)
+#  default        (y or n)
+
+function mkl_toggle_option_lib {
+
+    local help="$6"
+    [[ -z "$help" ]] && help="Enable $2"
+
+    # Add option argument
+    mkl_option "$1" "$3" "$5" "$help" "$8"
+
+    # Add corresponding "--disable-foo" option for "--enable-foo".
+    local disname="${5/--enable/--disable}"
+    local dishelp="${help/Enable/Disable}"
+    mkl_option "$1" "$3" "$disname" "$dishelp" "" "n"
+
+    # Create checks
+    eval "function _tmp_func { mkl_lib_check \"$2\" \"$3\" \"$4\" CC \"$7\"; }"
+    mkl_func_push MKL_CHECKS "$MKL_MODULE" _tmp_func
+}
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mklove/modules/configure.builtin
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mklove/modules/configure.builtin b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.builtin
new file mode 100644
index 0000000..546cbb2
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.builtin
@@ -0,0 +1,62 @@
+#!/bin/bash
+#
+# mklove builtin checks and options
+# Sets:
+#  prefix, etc..
+
+
+mkl_option "Standard" prefix "--prefix=PATH" \
+    "Install arch-independent files in PATH" "/usr/local"
+mkl_option "Standard" exec_prefix "--exec-prefix=PATH" \
+    "Install arch-dependent files in PATH" "\$prefix"
+mkl_option "Standard" bindir "--bindir=PATH" "User executables" "\$exec_prefix/bin"
+mkl_option "Standard" sbindir "--sbindir=PATH" "System admin executables" \
+    "\$exec_prefix/sbin"
+mkl_option "Standard" libexecdir "--libexecdir=PATH" "Program executables" \
+    "\$exec_prefix/libexec"
+mkl_option "Standard" datadir "--datadir=PATH" "Read-only arch-independent data" \
+    "\$prefix/share"
+mkl_option "Standard" sysconfdir "--sysconfdir=PATH" "Configuration data" \
+    "\$prefix/etc"
+mkl_option "Standard" sharedstatedir "--sharedstatedir=PATH" \
+    "Modifiable arch-independent data" "\$prefix/com"
+mkl_option "Standard" localstatedir "--localstatedir=PATH" \
+    "Modifiable local state data" "\$prefix/var"
+mkl_option "Standard" libdir "--libdir=PATH" "Libraries" "\$exec_prefix/lib"
+mkl_option "Standard" includedir "--includedir=PATH" "C/C++ header files" \
+    "\$prefix/include"
+mkl_option "Standard" infodir "--infodir=PATH" "Info documentation" "\$prefix/info"
+mkl_option "Standard" mandir "--mandir=PATH" "Manual pages" "\$prefix/man"
+
+mkl_option "Configure tool" "" "--list-modules" "List loaded mklove modules"
+mkl_option "Configure tool" "" "--list-checks" "List checks"
+mkl_option "Configure tool" env:MKL_FAILFATAL "--fail-fatal" "All failures are fatal"
+mkl_option "Configure tool" env:MKL_NOCACHE "--no-cache" "Dont use or generate config.cache"
+mkl_option "Configure tool" env:MKL_DEBUG "--debug" "Enable configure debugging"
+mkl_option "Configure tool" env:MKL_CLEAN "--clean" "Remove generated configure files"
+mkl_option "Configure tool" "" "--reconfigure" "Rerun configure with same arguments as last run"
+mkl_option "Configure tool" env:MKL_NO_DOWNLOAD "--no-download" "Disable downloads of required mklove modules"
+mkl_option "Configure tool" env:MKL_UPDATE_MODS "--update-modules" "Update modules from global repository"
+mkl_option "Configure tool" env:MKL_REPO_URL "--repo-url=URL_OR_PATH" "Override mklove modules repo URL" "$MKL_REPO_URL"
+mkl_option "Configure tool" "" "--help" "Show configure usage"
+
+
+mkl_toggle_option "Compatibility" "mk:MKL_MAINT_MODE" "--enable-maintainer-mode" "Maintainer mode (no-op)"
+
+mkl_option "Configure tool" "mk:PROGRAM_PREFIX" "--program-prefix=PFX" "Program prefix"
+
+mkl_option "Compatibility" "mk:DISABL_DEP_TRACK" "--disable-dependency-tracking" "Disable dependency tracking (no-op)"
+mkl_option "Compatibility" "mk:DISABL_SILENT_RULES" "--disable-silent-rules" "Verbose build output (no-op)"
+
+
+function checks {
+
+    if [[ ! -z $libdir ]]; then
+	mkl_mkvar_append "libdir" LDFLAGS "-L${libdir}"
+    fi
+
+    if [[ ! -z $includedir ]]; then
+	mkl_mkvar_append "includedir" CPPFLAGS "-I${includedir}"
+    fi
+
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mklove/modules/configure.cc
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mklove/modules/configure.cc b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.cc
new file mode 100644
index 0000000..30bddb1
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.cc
@@ -0,0 +1,178 @@
+#!/bin/bash
+#
+# Compiler detection
+# Sets:
+#  CC, CXX, CFLAGS, CPPFLAGS, LDFLAGS, ARFLAGS, PKG_CONFIG, INSTALL, MBITS
+
+
+mkl_require host
+
+function checks {
+
+    # C compiler
+    mkl_meta_set "ccenv" "name" "C compiler from CC env"
+    if ! mkl_command_check "ccenv" "WITH_CC" cont "$CC --version"; then
+        if mkl_command_check "gcc" "WITH_GCC" cont "gcc --version"; then
+            CC=gcc
+        elif mkl_command_check "clang" "WITH_CLANG" cont "clang --version"; then
+            CC=clang
+        elif mkl_command_check "cc" "WITH_CC" fail "cc --version"; then
+            CC=cc
+        fi
+    fi
+    export CC="${CC}"
+    mkl_mkvar_set CC CC "$CC"
+
+    if [[ $MKL_CC_WANT_CXX == 1 ]]; then
+    # C++ compiler
+        mkl_meta_set "cxxenv" "name" "C++ compiler from CXX env"
+        if ! mkl_command_check "cxxenv" "WITH_CXX" cont "$CXX --version" ; then
+            mkl_meta_set "gxx" "name" "C++ compiler (g++)"
+            mkl_meta_set "clangxx" "name" "C++ compiler (clang++)"
+            mkl_meta_set "cxx" "name" "C++ compiler (c++)"
+            if mkl_command_check "gxx" "WITH_GXX" cont "g++ --version"; then
+                CXX=g++
+            elif mkl_command_check "clangxx" "WITH_CLANGXX" cont "clang++ --version"; then
+                CXX=clang++
+            elif mkl_command_check "cxx" "WITH_CXX" fail "c++ --version"; then
+                CXX=c++
+            fi
+        fi
+        export CXX="${CXX}"
+        mkl_mkvar_set "CXX" CXX "$CXX"
+    fi
+
+    # Handle machine bits, if specified.
+    if [[ ! -z "$MBITS" ]]; then
+	mkl_meta_set mbits_m name "mbits compiler flag (-m$MBITS)"
+	if mkl_compile_check mbits_m "" fail CC "-m$MBITS"; then
+	    mkl_mkvar_append CPPFLAGS CPPFLAGS "-m$MBITS"
+	    mkl_mkvar_append LDFLAGS LDFLAGS "-m$MBITS"
+	fi
+	if [[ -z "$ARFLAGS" && $MBITS == 64 && $MKL_DISTRO == "SunOS" ]]; then
+	    # Turn on 64-bit archives on SunOS
+	    mkl_mkvar_append ARFLAGS ARFLAGS "S"
+	fi
+    fi
+
+    # Provide prefix and checks for various other build tools.
+    local t=
+    for t in LD:ld NM:nm OBJDUMP:objdump STRIP:strip ; do
+        local tenv=${t%:*}
+        t=${t#*:}
+	local tval="${!tenv}"
+
+        [[ -z $tval ]] && tval="$t"
+
+        if mkl_prog_check "$t" "" disable "$tval" ; then
+            if [[ $tval != ${!tenv} ]]; then
+		export "$tenv"="$tval"
+	    fi
+            mkl_mkvar_set $tenv $tenv "$tval"
+        fi
+    done
+
+    # Compiler and linker flags
+    [[ ! -z $CFLAGS ]]   && mkl_mkvar_set "CFLAGS" "CFLAGS" "$CFLAGS"
+    [[ ! -z $CPPFLAGS ]] && mkl_mkvar_set "CPPFLAGS" "CPPFLAGS" "$CPPFLAGS"
+    [[ ! -z $CXXFLAGS ]] && mkl_mkvar_set "CXXFLAGS" "CXXFLAGS" "$CXXFLAGS"
+    [[ ! -z $LDFLAGS ]]  && mkl_mkvar_set "LDFLAGS" "LDFLAGS" "$LDFLAGS"
+    [[ ! -z $ARFLAGS ]]  && mkl_mkvar_set "ARFLAGS" "ARFLAGS" "$ARFLAGS"
+
+    if [[ $MKL_NO_DEBUG_SYMBOLS != "y" ]]; then
+        # Add debug symbol flag (-g)
+        # OSX 10.9 requires -gstrict-dwarf for some reason.
+        mkl_meta_set cc_g_dwarf name "debug symbols compiler flag (-g...)"
+        if [[ $MKL_DISTRO == "osx" ]]; then
+            if mkl_compile_check cc_g_dwarf "" cont CC "-gstrict-dwarf"; then
+                mkl_mkvar_append CPPFLAGS CPPFLAGS "-gstrict-dwarf"
+            else
+                mkl_mkvar_append CPPFLAGS CPPFLAGS "-g"
+            fi
+        else
+            mkl_mkvar_append CPPFLAGS CPPFLAGS "-g"
+        fi
+    fi
+
+
+    # pkg-config
+    if [ -z "$PKG_CONFIG" ]; then
+        PKG_CONFIG=pkg-config
+    fi
+
+    if mkl_command_check "pkgconfig" "WITH_PKGCONFIG" cont "$PKG_CONFIG --version"; then
+        export PKG_CONFIG
+    fi
+    mkl_mkvar_set "pkgconfig" PKG_CONFIG $PKG_CONFIG
+
+    [[ ! -z "$PKG_CONFIG_PATH" ]] && mkl_env_append PKG_CONFIG_PATH "$PKG_CONFIG_PATH"
+
+    # install
+    if [ -z "$INSTALL" ]; then
+	if [[ $MKL_DISTRO == "SunOS" ]]; then
+	    mkl_meta_set ginstall name "GNU install"
+	    if mkl_command_check ginstall "" ignore "ginstall --version"; then
+		INSTALL=ginstall
+	    else
+		INSTALL=install
+	    fi
+        else
+            INSTALL=install
+	fi
+    fi
+
+    if mkl_command_check "install" "WITH_INSTALL" cont "$INSTALL --version"; then
+        export INSTALL
+    fi
+    mkl_mkvar_set "install" INSTALL $INSTALL
+
+
+    # Enable profiling if desired
+    if [[ $WITH_PROFILING == y ]]; then
+        mkl_allvar_set "" "WITH_PROFILING" "y"
+        mkl_mkvar_append CPPFLAGS CPPFLAGS "-pg"
+        mkl_mkvar_append LDFLAGS LDFLAGS   "-pg"
+    fi
+
+    # Optimization
+    if [[ $WITHOUT_OPTIMIZATION == n ]]; then
+        mkl_mkvar_append CPPFLAGS CPPFLAGS "-O2"
+    else
+        mkl_mkvar_append CPPFLAGS CPPFLAGS "-O0"
+    fi
+
+    # Static linking
+    if [[ $WITH_STATIC_LINKING == y ]]; then
+        # LDFLAGS_STATIC is the LDFLAGS needed to enable static linking
+        # of sub-sequent libraries, while
+        # LDFLAGS_DYNAMIC is the LDFLAGS needed to enable dynamic linking.
+        if [[ $MKL_DISTRO != "osx" ]]; then
+            mkl_mkvar_set staticlinking LDFLAGS_STATIC  "-Wl,-Bstatic"
+            mkl_mkvar_set staticlinking LDFLAGS_DYNAMIC "-Wl,-Bdynamic"
+            mkl_mkvar_set staticlinking HAS_LDFLAGS_STATIC y
+        else
+            # OSX linker can't enable/disable static linking so we'll
+            # need to find the .a through STATIC_LIB_libname env var
+            mkl_mkvar_set staticlinking HAS_LDFLAGS_STATIC n
+        fi
+    fi
+}
+
+
+mkl_option "Compiler" "env:CC" "--cc=CC" "Build using C compiler CC" "\$CC"
+mkl_option "Compiler" "env:CXX" "--cxx=CXX" "Build using C++ compiler CXX" "\$CXX"
+mkl_option "Compiler" "ARCH" "--arch=ARCH" "Build for architecture" "$(uname -m)"
+mkl_option "Compiler" "CPU" "--cpu=CPU" "Build and optimize for specific CPU" "generic"
+mkl_option "Compiler" "MBITS" "--mbits=BITS" "Machine bits (32 or 64)" ""
+
+for n in CFLAGS CPPFLAGS CXXFLAGS LDFLAGS ARFLAGS; do
+    mkl_option "Compiler" "mk:$n" "--$n=$n" "Add $n flags"
+done
+
+mkl_option "Compiler" "env:PKG_CONFIG_PATH" "--pkg-config-path" "Extra paths for pkg-config"
+
+mkl_option "Compiler" "WITH_PROFILING" "--enable-profiling" "Enable profiling"
+mkl_option "Compiler" "WITH_STATIC_LINKING" "--enable-static" "Enable static linking"
+mkl_option "Compiler" "WITHOUT_OPTIMIZATION" "--disable-optimization" "Disable optimization flag to compiler" "n"
+mkl_option "Compiler" "env:MKL_NO_DEBUG_SYMBOLS" "--disable-debug-symbols" "Disable debugging symbols" "n"
+mkl_option "Compiler" "env:MKL_WANT_WERROR" "--enable-werror" "Enable compiler warnings as errors" "n"

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mklove/modules/configure.cxx
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mklove/modules/configure.cxx b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.cxx
new file mode 100644
index 0000000..a38ac73
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.cxx
@@ -0,0 +1,8 @@
+#!/bin/bash
+#
+# C++ detection
+#
+# This script simply limits the checks of configure.cc
+
+
+MKL_CC_WANT_CXX=1

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mklove/modules/configure.fileversion
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mklove/modules/configure.fileversion b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.fileversion
new file mode 100644
index 0000000..9bea117
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.fileversion
@@ -0,0 +1,65 @@
+#!/bin/bash
+#
+# Reads version from file and sets variables accordingly
+# The first non-commented line in the file is expected to be the version string.
+# Arguments:
+#    filename
+#    STR_VERSION_VARIABLE_NAME
+#  [ HEX_VERSION_VARIABLE_NAME ]
+#
+# Example: Set string version in variable named "MYVERSION_STR" and
+#          the hex representation in "MYVERSION"
+#   mkl_require VERSION.txt MYVERSION_STR MYVERSION
+
+if [[ -z "$2" ]]; then
+    mkl_fail "fileversion" "none" "fail" "Missing argument(s), expected: FILENAME STR_VER HEX_VER"
+    return 0
+fi
+
+fileversion_file="$1"
+fileversion_strvar="$2"
+fileversion_hexvar="$3"
+
+function checks {
+    mkl_check_begin "fileversion" "" "no-cache" "version from file $fileversion_file"
+
+    if [[ ! -s $fileversion_file ]]; then
+        mkl_check_failed "fileversion" "" "fail" \
+            "Version file $fileversion_file is not readable"
+        return 1
+    fi
+
+    local orig=$(grep -v ^\# "$fileversion_file" | grep -v '^$' | head -1)
+    # Strip v prefix if any
+    orig=${orig#v}
+
+    # Try to decode version string into hex
+    # Supported format is "[v]NN.NN.NN[.NN]"
+    if [[ ! -z $fileversion_hexvar ]]; then
+        local hex=""
+        local s=${orig#v} # Strip v prefix, if any.
+        local ncnt=0
+        local n=
+        for n in ${s//./ } ; do
+            if [[ ! ( "$n" =~ ^[0-9][0-9]?$ ) ]]; then
+                mkl_check_failed "fileversion" "" "fail" \
+                    "$fileversion_file: Could not decode '$orig' into hex version, expecting format 'NN.NN.NN[.NN]'"
+                return 1
+            fi
+            hex="$hex$(printf %02x $n)"
+            ncnt=$(expr $ncnt + 1)
+        done
+
+        if [[ ! -z $hex ]]; then
+            # Finish all four bytess
+            for n in {$ncnt..4} ; do
+                hex="$hex$(printf %02x 0)"
+            done
+            mkl_allvar_set "fileversion" "$fileversion_hexvar" "0x$hex"
+        fi
+    fi
+
+    mkl_allvar_set "fileversion" "$fileversion_strvar" "$orig"
+
+    mkl_check_done "fileversion" "" "cont" "ok" "${!fileversion_strvar}"
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mklove/modules/configure.gitversion
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mklove/modules/configure.gitversion b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.gitversion
new file mode 100644
index 0000000..b6ac486
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.gitversion
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Sets version variable from git information.
+# Optional arguments:
+#   "as"
+#   VARIABLE_NAME
+#
+# Example: Set version in variable named "MYVERSION":
+#   mkl_require gitversion as MYVERSION
+
+if [[ $1 == "as" ]]; then
+    __MKL_GITVERSION_VARNAME="$2"
+else
+    __MKL_GITVERSION_VARNAME="VERSION"
+fi
+
+function checks {
+    mkl_allvar_set "gitversion" "$__MKL_GITVERSION_VARNAME" "$(git describe --abbrev=6 --tags HEAD --always)"
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mklove/modules/configure.good_cflags
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mklove/modules/configure.good_cflags b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.good_cflags
new file mode 100644
index 0000000..c8587f2
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.good_cflags
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Provides some known-good CFLAGS
+# Sets:
+#  CFLAGS
+#  CXXFLAGS
+#  CPPFLAGS
+
+
+function checks {
+    mkl_mkvar_append CPPFLAGS CPPFLAGS \
+        "-Wall -Wsign-compare -Wfloat-equal -Wpointer-arith -Wcast-align"
+
+    if [[ $MKL_WANT_WERROR = "y" ]]; then
+        mkl_mkvar_append CPPFLAGS CPPFLAGS \
+            "-Werror"
+    fi
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mklove/modules/configure.host
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mklove/modules/configure.host b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.host
new file mode 100644
index 0000000..4dfdce8
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.host
@@ -0,0 +1,110 @@
+#!/bin/bash
+#
+# Host OS support
+# Sets:
+#  HOST
+#  BUILD
+#  TARGET
+
+# FIXME: No need for this right now
+#mkl_require host_linux
+#mkl_require host_osx
+#mkl_require host_cygwin
+
+#mkl_option "Cross-compilation" "mk:HOST_OS" "--host-os=osname" "Host OS (linux,osx,cygwin,..)" "auto"
+
+
+# autoconf compatibility - does nothing at this point
+mkl_option "Cross-compilation" "mk:HOST" "--host=HOST" "Configure to build programs to run on HOST (no-op)"
+mkl_option "Cross-compilation" "mk:BUILD" "--build=BUILD" "Configure for building on BUILD (no-op)"
+mkl_option "Cross-compilation" "mk:TARGET" "--target=TARGET" "Configure for building cross-toolkits for platform TARGET (no-op)"
+
+function checks {
+    # Try to figure out what OS/distro we are running on.
+    mkl_check_begin "distro" "" "no-cache" "OS or distribution"
+
+    solib_ext=.so
+
+    # Try lsb_release
+    local sys
+    sys=$(lsb_release -is 2>/dev/null)
+    if [[ $? -gt 0 ]]; then
+        # That didnt work, try uname.
+        local kn=$(uname -s)
+        case $kn in
+            Linux)
+                sys=Linux
+                solib_ext=.so
+                ;;
+            Darwin)
+                sys=osx
+                solib_ext=.dylib
+                ;;
+            CYGWIN*)
+                sys=Cygwin
+                solib_ext=.dll
+                ;;
+            *)
+                sys="$kn"
+                solib_ext=.so
+                ;;
+        esac
+    fi
+
+    if [[ -z $sys ]]; then
+        mkl_check_failed "distro" "" "ignore" ""
+    else
+        mkl_check_done "distro" "" "ignore" "ok" "$sys"
+        mkl_mkvar_set "distro" "MKL_DISTRO" "$sys"
+        mkl_allvar_set "distro" "SOLIB_EXT" "$solib_ext"
+    fi
+}
+
+#function checks {
+#    mkl_check_begin "host" "HOST_OS" "no-cache" "host OS"
+#
+#    #
+#    # If --host-os=.. was not specified then this is most likely not a
+#    # a cross-compilation and we can base the host-os on the native OS.
+#    #
+#    if [[ $HOST_OS != "auto" ]]; then
+#        mkl_check_done "host" "HOST_OS" "cont" "ok" "$HOST_OS"
+#        return 0
+#    fi
+#
+#    kn=$(uname -s)
+#    case $kn in
+#        Linux)
+#            hostos=linux
+#            ;;
+#        Darwin)
+#            hostos=osx
+#            ;;
+#        CYGWIN*)
+#            hostos=cygwin
+#            ;;
+#        *)
+#            hostos="$(mkl_lower $kn)"
+#            mkl_err  "Unknown host OS kernel name: $kn"
+#            mkl_err0 "  Will attempt to load module host_$hostos anyway."
+#            mkl_err0 "  Please consider writing a configure.host_$hostos"
+#            ;;
+#    esac
+#
+#    if ! mkl_require --try "host_$hostos"; then
+#        # Module not found
+#        mkl_check_done "host" "HOST_OS" "cont" "failed" "$kn?"
+#    else
+#        # Module loaded
+#
+#        if mkl_func_exists "host_${hostos}_setup" ; then
+#            "host_${hostos}_setup"
+#        fi
+#
+#        mkl_check_done "host" "HOST_OS" "cont" "ok" "$hostos"
+#    fi
+#
+#    # Set HOST_OS var even if probing failed.
+#    mkl_mkvar_set "host" "HOST_OS" "$hostos"
+#}
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mklove/modules/configure.lib
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mklove/modules/configure.lib b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.lib
new file mode 100644
index 0000000..49ed293
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.lib
@@ -0,0 +1,49 @@
+#!/bin/bash
+#
+# Module for building shared libraries
+# Sets:
+#  WITH_GNULD | WITH_OSXLD
+#  WITH_LDS  - linker script support
+mkl_require pic
+
+function checks {
+
+    mkl_mkvar_append LIB_LDFLAGS LIB_LDFLAGS '-shared'
+
+    # Check what arguments to pass to CC or LD for shared libraries
+    mkl_meta_set gnulib name "GNU-compatible linker options"
+    mkl_meta_set osxlib name "OSX linker options"
+
+    if mkl_compile_check gnulib WITH_GNULD cont CC \
+	"-shared -Wl,-soname,mkltest.0" "" ; then
+	# GNU linker
+	mkl_mkvar_append LIB_LDFLAGS LIB_LDFLAGS '-Wl,-soname,$(LIBFILENAME)'
+
+    elif mkl_compile_check osxlib WITH_OSXLD cont CC \
+	"-dynamiclib -Wl,-install_name,/tmp/mkltest.so.0" ; then
+	# OSX linker
+        mkl_mkvar_append LIB_LDFLAGS LIB_LDFLAGS '-dynamiclib -Wl,-install_name,$(DESTDIR)$(libdir)/$(LIBFILENAME)'
+    fi
+
+    # Check what argument is needed for passing linker script.
+    local ldsfile=$(mktemp _mkltmpXXXXXX)
+    echo "{
+ global:
+  *;
+};
+" > $ldsfile
+
+    mkl_meta_set ldsflagvs name "GNU linker-script ld flag"
+    mkl_meta_set ldsflagm name "Solaris linker-script ld flag"
+    if mkl_compile_check ldsflagvs "" cont CC \
+	"-shared -Wl,--version-script=$ldsfile"; then
+	mkl_mkvar_set ldsflagvs LDFLAG_LINKERSCRIPT "-Wl,--version-script="
+	mkl_mkvar_set lib_lds WITH_LDS y
+    elif mkl_compile_check ldsflagm ""  ignore CC \
+	"-shared -Wl,-M$ldsfile"; then
+	mkl_mkvar_set ldsflagm LDFLAG_LINKERSCRIPT "-Wl,-M"
+	mkl_mkvar_set lib_lds WITH_LDS y
+    fi
+
+    rm -f "$ldsfile"
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mklove/modules/configure.parseversion
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mklove/modules/configure.parseversion b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.parseversion
new file mode 100644
index 0000000..0ee0f57
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.parseversion
@@ -0,0 +1,95 @@
+#!/bin/bash
+#
+# Parses the provided version string and creates variables accordingly.
+#  [ "hex2str" <fmt> ]  -- version-string is in hex (e.g., 0x00080300)
+#    version-string
+#    STR_VERSION_VARIABLE_NAME
+#  [ HEX_VERSION_VARIABLE_NAME ]
+#
+# Note: The version will also be set in MKL_APP_VERSION
+#
+# Example: Set string version in variable named "MYVERSION_STR" and
+#          the hex representation in "MYVERSION"
+#   mkl_require parseversion "$(head -1 VERSION.txt)" MYVERSION_STR MYVERSION
+
+if [[ $1 == "hex2str" ]]; then
+    parseversion_type="hex"
+    parseversion_fmt="${2}:END:%d%d%d%d"
+    shift
+    shift
+else
+    parseversion_type=""
+    parseversion_fmt="%d.%d.%d.%d"
+fi
+
+if [[ -z "$2" ]]; then
+    mkl_fail "parseversion" "none" "fail" "Missing argument(s)"
+    return 0
+fi
+
+parseversion_orig="$1"
+parseversion_strvar="$2"
+parseversion_hexvar="$3"
+
+function checks {
+    mkl_check_begin --verb "parsing" "parseversion" "" "no-cache" \
+        "version '$parseversion_orig'"
+
+    # Strip v prefix if any
+    orig=${parseversion_orig#v}
+
+    if [[ $orig == 0x* ]]; then
+        parseversion_type="hex"
+        orig=${orig#0x}
+    fi
+
+    if [[ -z $orig ]]; then
+        mkl_check_failed "parseversion" "" "fail" "Version string is empty"
+        return 1
+    fi
+
+    # If orig is in hex we construct a string format instead.
+    if [[ $parseversion_type == "hex" ]]; then
+        local s=$orig
+        local str=""
+        local vals=""
+        while [[ ! -z $s ]]; do
+            local n=${s:0:2}
+            s=${s:${#n}}
+            vals="${vals}$(printf %d 0x$n) "
+        done
+        str=$(printf "$parseversion_fmt" $vals)
+        orig=${str%:END:*}
+    fi
+
+
+    # Try to decode version string into hex
+    # Supported format is "[v]NN.NN.NN[.NN]"
+    if [[ ! -z $parseversion_hexvar ]]; then
+        local hex=""
+        local s=$orig
+        local ncnt=0
+        local n=
+        for n in ${s//./ } ; do
+            if [[ ! ( "$n" =~ ^[0-9][0-9]?$ ) ]]; then
+                mkl_check_failed "parseversion" "" "fail" \
+                    "Could not decode '$parseversion_orig' into hex version, expecting format 'NN.NN.NN[.NN]'"
+                return 1
+            fi
+            hex="$hex$(printf %02x $n)"
+            ncnt=$(expr $ncnt + 1)
+        done
+
+        if [[ ! -z $hex ]]; then
+            # Finish all four bytess
+            while [[ ${#hex} -lt 8 ]]; do
+                hex="$hex$(printf %02x 0)"
+            done
+            mkl_allvar_set "parseversion" "$parseversion_hexvar" "0x$hex"
+        fi
+    fi
+
+    mkl_allvar_set "parseversion" "$parseversion_strvar" "$orig"
+    mkl_allvar_set "parseversion" MKL_APP_VERSION "$orig"
+    mkl_check_done "parseversion" "" "cont" "ok" "${!parseversion_strvar}"
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mklove/modules/configure.pic
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mklove/modules/configure.pic b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.pic
new file mode 100644
index 0000000..8f138f8
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.pic
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Checks if -fPIC is supported, and if so turns it on.
+#
+# Sets:
+#  HAVE_PIC
+#  CPPFLAGS
+#
+
+function checks {
+
+    if mkl_compile_check PIC HAVE_PIC disable CC "-fPIC" "" ; then
+        mkl_mkvar_append CPPFLAGS CPPFLAGS "-fPIC"
+    fi
+}
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/mklove/modules/configure.socket
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/mklove/modules/configure.socket b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.socket
new file mode 100644
index 0000000..f0777ab
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/mklove/modules/configure.socket
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Provides proper compiler flags for socket support, e.g. socket(3).
+
+function checks {
+
+    local src="
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+void foo (void) {
+   int s = socket(0, 0, 0);
+   close(s);
+}"
+    if ! mkl_compile_check socket "" cont CC "" "$src"; then
+	if mkl_compile_check --ldflags="-lsocket -lnsl" socket_nsl "" fail CC "" "$src"; then
+	    mkl_mkvar_append socket_nsl LIBS "-lsocket -lnsl"
+	fi
+    fi
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/RELEASE.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/RELEASE.md b/thirdparty/librdkafka-0.11.4/packaging/RELEASE.md
new file mode 100644
index 0000000..70d4519
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/RELEASE.md
@@ -0,0 +1,137 @@
+# librdkafka release process
+
+This guide outlines the steps needed to release a new version of librdkafka
+and publish packages to channels (NuGet, Homebrew, etc,..).
+
+Releases are done in two phases:
+ * release-candidate(s) - RC1 will be the first release candidate, and any
+   changes to the repository will require a new RC.
+ * final release - the final release is based directly on the last RC tag
+   followed by a single version-bump commit (see below).
+
+Release tag and version format:
+ * release-candidate: vA.B.C-RCn
+ * final release: vA.B.C
+
+
+
+## Run regression tests
+
+**Build tests:**
+
+    $ cd tests
+    $ make -j build
+
+**Run the full regression test suite:** (requires Linux and the trivup python package)
+
+    $ make full
+
+
+If all tests pass, carry on, otherwise identify and fix bug and start over.
+
+
+## Pre-release code tasks
+
+**Switch to the release branch which is of the format `A.B.C.x` or `A.B.x`.**
+
+    $ git checkout -b 0.11.1.x
+
+
+**Update in-code versions.**
+
+The last octet in the version hex number is the pre-build/release-candidate
+number, where 0xAABBCCff is the final release for version 0xAABBCC.
+Release candidates start at 200, thus 0xAABBCCc9 is RC1, 0xAABBCCca is RC2, etc.
+
+Change the `RD_KAFKA_VERSION` defines in both `src/rdkafka.h` and
+`src-cpp/rdkafkacpp.h` to the version to build, such as 0x000b01c9
+for v0.11.1-RC1, or 0x000b01ff for the final v0.11.1 release.
+
+   # Update defines
+   $ $EDITOR src/rdkafka.h src-cpp/rdkafkacpp.h
+
+   # Reconfigure and build
+   $ ./configure
+   $ make
+
+   # Check git diff for correctness
+   $ git diff
+
+   # Commit
+   $ git commit -m "Version v0.11.1-RC1" src/rdkafka.h src-cpp/rdkafkacpp.h
+
+
+**Create tag.**
+
+    $ git tag v0.11.1-RC1 # for an RC
+    # or for the final release:
+    $ git tag v0.11.1     # for the final release
+
+
+**Push branch and commit to github**
+
+    # Dry-run first to make sure things look correct
+    $ git push --dry-run origin 0.11.1.x
+
+    # Live
+    $ git push origin 0.11.1.x
+**Push tags and commit to github**
+
+    # Dry-run first to make sure things look correct.
+    $ git push --dry-run --tags origin v0.11.1-RC1
+
+    # Live
+    $ git push --tags origin v0.11.1-RC1
+
+
+
+## Creating packages
+
+As soon as a tag is pushed the CI systems (Travis and AppVeyor) will
+start their builds and eventually upload the packaging artifacts to S3.
+Wait until this process is finished by monitoring the two CIs:
+
+ * https://travis-ci.org/edenhill/librdkafka
+ * https://ci.appveyor.com/project/edenhill/librdkafka
+
+
+### Create NuGet package
+
+On a Linux host with docker installed, this will also require S3 credentials
+to be set up.
+
+    $ cd packaging/nuget
+    $ pip install -r requirements.txt  # if necessary
+    $ ./release.py v0.11.1-RC1
+
+Test the generated librdkafka.redist.0.11.1-RC1.nupkg and
+then upload it to NuGet manually:
+
+ * https://www.nuget.org/packages/manage/upload
+
+
+### Homebrew recipe update
+
+The brew-update-pr.sh script automatically pushes a PR to homebrew-core
+with a patch to update the librdkafka version of the formula.
+This should only be done for final releases and not release candidates.
+
+On a MacOSX host with homebrew installed:
+
+    $ cd package/homebrew
+    # Dry-run first to see that things are okay.
+    $ ./brew-update-pr.sh v0.11.1
+    # If everything looks good, do the live push:
+    $ ./brew-update-pr.sh --upload v0.11.1
+
+
+### Deb and RPM packaging
+
+Debian and RPM packages are generated by Confluent packaging in a separate
+process and the resulting packages are made available on Confluent's
+APT and YUM repositories.
+
+That process is outside the scope of this document.
+
+See the Confluent docs for instructions how to access these packages:
+https://docs.confluent.io/current/installation.html

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/archlinux/PKGBUILD
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/archlinux/PKGBUILD b/thirdparty/librdkafka-0.11.4/packaging/archlinux/PKGBUILD
new file mode 100644
index 0000000..9321698
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/archlinux/PKGBUILD
@@ -0,0 +1,5 @@
+pkgname=librdkafka
+pkgver=master
+pkgrel=1
+pkgdesc=The Apache Kafka C/C++ client library
+arch=('i686' 'x86_64')

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/packaging/cmake/Config.cmake.in
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/packaging/cmake/Config.cmake.in b/thirdparty/librdkafka-0.11.4/packaging/cmake/Config.cmake.in
new file mode 100644
index 0000000..ef9e067
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/packaging/cmake/Config.cmake.in
@@ -0,0 +1,20 @@
+@PACKAGE_INIT@
+
+include(CMakeFindDependencyMacro)
+
+if(@WITH_ZLIB@)
+  find_dependency(ZLIB)
+endif()
+
+if(@WITH_SSL@)
+  if(@WITH_BUNDLED_SSL@)
+    # TODO: custom SSL library should be installed
+  else()
+    find_dependency(OpenSSL)
+  endif()
+endif()
+
+find_dependency(Threads)
+
+include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake")
+check_required_components("@PROJECT_NAME@")


[39/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/lz4hc.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/lz4hc.c b/thirdparty/librdkafka-0.11.1/src/lz4hc.c
deleted file mode 100644
index ac15d20..0000000
--- a/thirdparty/librdkafka-0.11.1/src/lz4hc.c
+++ /dev/null
@@ -1,786 +0,0 @@
-/*
-    LZ4 HC - High Compression Mode of LZ4
-    Copyright (C) 2011-2017, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-       - LZ4 source repository : https://github.com/lz4/lz4
-       - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
-
-
-/* *************************************
-*  Tuning Parameter
-***************************************/
-
-/*! HEAPMODE :
- *  Select how default compression function will allocate workplace memory,
- *  in stack (0:fastest), or in heap (1:requires malloc()).
- *  Since workplace is rather large, heap mode is recommended.
- */
-#ifndef LZ4HC_HEAPMODE
-#  define LZ4HC_HEAPMODE 1
-#endif
-
-
-/*===    Dependency    ===*/
-#include "lz4hc.h"
-
-
-/*===   Common LZ4 definitions   ===*/
-#if defined(__GNUC__)
-#  pragma GCC diagnostic ignored "-Wunused-function"
-#endif
-#if defined (__clang__)
-#  pragma clang diagnostic ignored "-Wunused-function"
-#endif
-
-#define LZ4_COMMONDEFS_ONLY
-#include "lz4.c"   /* LZ4_count, constants, mem */
-
-
-/*===   Constants   ===*/
-#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
-
-
-/*===   Macros   ===*/
-#define HASH_FUNCTION(i)       (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG))
-#define DELTANEXTMAXD(p)       chainTable[(p) & LZ4HC_MAXD_MASK]    /* flexible, LZ4HC_MAXD dependent */
-#define DELTANEXTU16(p)        chainTable[(U16)(p)]   /* faster */
-
-static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); }
-
-
-
-/**************************************
-*  HC Compression
-**************************************/
-static void LZ4HC_init (LZ4HC_CCtx_internal* hc4, const BYTE* start)
-{
-    MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable));
-    MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
-    hc4->nextToUpdate = 64 KB;
-    hc4->base = start - 64 KB;
-    hc4->end = start;
-    hc4->dictBase = start - 64 KB;
-    hc4->dictLimit = 64 KB;
-    hc4->lowLimit = 64 KB;
-}
-
-
-/* Update chains up to ip (excluded) */
-FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
-{
-    U16* const chainTable = hc4->chainTable;
-    U32* const hashTable  = hc4->hashTable;
-    const BYTE* const base = hc4->base;
-    U32 const target = (U32)(ip - base);
-    U32 idx = hc4->nextToUpdate;
-
-    while (idx < target) {
-        U32 const h = LZ4HC_hashPtr(base+idx);
-        size_t delta = idx - hashTable[h];
-        if (delta>MAX_DISTANCE) delta = MAX_DISTANCE;
-        DELTANEXTU16(idx) = (U16)delta;
-        hashTable[h] = idx;
-        idx++;
-    }
-
-    hc4->nextToUpdate = target;
-}
-
-
-FORCE_INLINE int LZ4HC_InsertAndFindBestMatch (LZ4HC_CCtx_internal* hc4,   /* Index table will be updated */
-                                               const BYTE* ip, const BYTE* const iLimit,
-                                               const BYTE** matchpos,
-                                               const int maxNbAttempts)
-{
-    U16* const chainTable = hc4->chainTable;
-    U32* const HashTable = hc4->hashTable;
-    const BYTE* const base = hc4->base;
-    const BYTE* const dictBase = hc4->dictBase;
-    const U32 dictLimit = hc4->dictLimit;
-    const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - (64 KB - 1);
-    U32 matchIndex;
-    int nbAttempts = maxNbAttempts;
-    size_t ml = 0;
-
-    /* HC4 match finder */
-    LZ4HC_Insert(hc4, ip);
-    matchIndex = HashTable[LZ4HC_hashPtr(ip)];
-
-    while ((matchIndex>=lowLimit) && (nbAttempts)) {
-        nbAttempts--;
-        if (matchIndex >= dictLimit) {
-            const BYTE* const match = base + matchIndex;
-            if (*(match+ml) == *(ip+ml)
-                && (LZ4_read32(match) == LZ4_read32(ip)))
-            {
-                size_t const mlt = LZ4_count(ip+MINMATCH, match+MINMATCH, iLimit) + MINMATCH;
-                if (mlt > ml) { ml = mlt; *matchpos = match; }
-            }
-        } else {
-            const BYTE* const match = dictBase + matchIndex;
-            if (LZ4_read32(match) == LZ4_read32(ip)) {
-                size_t mlt;
-                const BYTE* vLimit = ip + (dictLimit - matchIndex);
-                if (vLimit > iLimit) vLimit = iLimit;
-                mlt = LZ4_count(ip+MINMATCH, match+MINMATCH, vLimit) + MINMATCH;
-                if ((ip+mlt == vLimit) && (vLimit < iLimit))
-                    mlt += LZ4_count(ip+mlt, base+dictLimit, iLimit);
-                if (mlt > ml) { ml = mlt; *matchpos = base + matchIndex; }   /* virtual matchpos */
-            }
-        }
-        matchIndex -= DELTANEXTU16(matchIndex);
-    }
-
-    return (int)ml;
-}
-
-
-FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch (
-    LZ4HC_CCtx_internal* hc4,
-    const BYTE* const ip,
-    const BYTE* const iLowLimit,
-    const BYTE* const iHighLimit,
-    int longest,
-    const BYTE** matchpos,
-    const BYTE** startpos,
-    const int maxNbAttempts)
-{
-    U16* const chainTable = hc4->chainTable;
-    U32* const HashTable = hc4->hashTable;
-    const BYTE* const base = hc4->base;
-    const U32 dictLimit = hc4->dictLimit;
-    const BYTE* const lowPrefixPtr = base + dictLimit;
-    const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - (64 KB - 1);
-    const BYTE* const dictBase = hc4->dictBase;
-    U32   matchIndex;
-    int nbAttempts = maxNbAttempts;
-    int delta = (int)(ip-iLowLimit);
-
-
-    /* First Match */
-    LZ4HC_Insert(hc4, ip);
-    matchIndex = HashTable[LZ4HC_hashPtr(ip)];
-
-    while ((matchIndex>=lowLimit) && (nbAttempts)) {
-        nbAttempts--;
-        if (matchIndex >= dictLimit) {
-            const BYTE* matchPtr = base + matchIndex;
-            if (*(iLowLimit + longest) == *(matchPtr - delta + longest)) {
-                if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
-                    int mlt = MINMATCH + LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit);
-                    int back = 0;
-
-                    while ((ip+back > iLowLimit)
-                           && (matchPtr+back > lowPrefixPtr)
-                           && (ip[back-1] == matchPtr[back-1]))
-                            back--;
-
-                    mlt -= back;
-
-                    if (mlt > longest) {
-                        longest = (int)mlt;
-                        *matchpos = matchPtr+back;
-                        *startpos = ip+back;
-            }   }   }
-        } else {
-            const BYTE* const matchPtr = dictBase + matchIndex;
-            if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
-                size_t mlt;
-                int back=0;
-                const BYTE* vLimit = ip + (dictLimit - matchIndex);
-                if (vLimit > iHighLimit) vLimit = iHighLimit;
-                mlt = LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
-                if ((ip+mlt == vLimit) && (vLimit < iHighLimit))
-                    mlt += LZ4_count(ip+mlt, base+dictLimit, iHighLimit);
-                while ((ip+back > iLowLimit) && (matchIndex+back > lowLimit) && (ip[back-1] == matchPtr[back-1])) back--;
-                mlt -= back;
-                if ((int)mlt > longest) { longest = (int)mlt; *matchpos = base + matchIndex + back; *startpos = ip+back; }
-            }
-        }
-        matchIndex -= DELTANEXTU16(matchIndex);
-    }
-
-    return longest;
-}
-
-
-typedef enum {
-    noLimit = 0,
-    limitedOutput = 1,
-    limitedDestSize = 2,
-} limitedOutput_directive;
-
-#define LZ4HC_DEBUG 0
-#if LZ4HC_DEBUG
-static unsigned debug = 0;
-#endif
-
-
-/* LZ4HC_encodeSequence() :
- * @return : 0 if ok,
- *           1 if buffer issue detected */
-FORCE_INLINE int LZ4HC_encodeSequence (
-    const BYTE** ip,
-    BYTE** op,
-    const BYTE** anchor,
-    int matchLength,
-    const BYTE* const match,
-    limitedOutput_directive limit,
-    BYTE* oend)
-{
-    size_t length;
-    BYTE* token;
-
-#if LZ4HC_DEBUG
-    if (debug) printf("literal : %u  --  match : %u  --  offset : %u\n", (U32)(*ip - *anchor), (U32)matchLength, (U32)(*ip-match));
-#endif
-
-    /* Encode Literal length */
-    length = (size_t)(*ip - *anchor);
-    token = (*op)++;
-    if ((limit) && ((*op + (length >> 8) + length + (2 + 1 + LASTLITERALS)) > oend)) return 1;   /* Check output limit */
-    if (length >= RUN_MASK) {
-        size_t len = length - RUN_MASK;
-        *token = (RUN_MASK << ML_BITS);
-        for(; len >= 255 ; len -= 255) *(*op)++ = 255;
-        *(*op)++ = (BYTE)len;
-    } else {
-        *token = (BYTE)(length << ML_BITS);
-    }
-
-    /* Copy Literals */
-    LZ4_wildCopy(*op, *anchor, (*op) + length);
-    *op += length;
-
-    /* Encode Offset */
-    LZ4_writeLE16(*op, (U16)(*ip-match)); *op += 2;
-
-    /* Encode MatchLength */
-    length = (size_t)(matchLength - MINMATCH);
-    if ((limit) && (*op + (length >> 8) + (1 + LASTLITERALS) > oend)) return 1;   /* Check output limit */
-    if (length >= ML_MASK) {
-        *token += ML_MASK;
-        length -= ML_MASK;
-        for(; length >= 510 ; length -= 510) { *(*op)++ = 255; *(*op)++ = 255; }
-        if (length >= 255) { length -= 255; *(*op)++ = 255; }
-        *(*op)++ = (BYTE)length;
-    } else {
-        *token += (BYTE)(length);
-    }
-
-    /* Prepare next loop */
-    *ip += matchLength;
-    *anchor = *ip;
-
-    return 0;
-}
-
-/* btopt */
-#include "lz4opt.h"
-
-
-static int LZ4HC_compress_hashChain (
-    LZ4HC_CCtx_internal* const ctx,
-    const char* const source,
-    char* const dest,
-    int* srcSizePtr,
-    int const maxOutputSize,
-    unsigned maxNbAttempts,
-    limitedOutput_directive limit
-    )
-{
-    const int inputSize = *srcSizePtr;
-
-    const BYTE* ip = (const BYTE*) source;
-    const BYTE* anchor = ip;
-    const BYTE* const iend = ip + inputSize;
-    const BYTE* const mflimit = iend - MFLIMIT;
-    const BYTE* const matchlimit = (iend - LASTLITERALS);
-
-    BYTE* optr = (BYTE*) dest;
-    BYTE* op = (BYTE*) dest;
-    BYTE* oend = op + maxOutputSize;
-
-    int   ml, ml2, ml3, ml0;
-    const BYTE* ref = NULL;
-    const BYTE* start2 = NULL;
-    const BYTE* ref2 = NULL;
-    const BYTE* start3 = NULL;
-    const BYTE* ref3 = NULL;
-    const BYTE* start0;
-    const BYTE* ref0;
-
-    /* init */
-    *srcSizePtr = 0;
-    if (limit == limitedDestSize && maxOutputSize < 1) return 0;         /* Impossible to store anything */
-    if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0;              /* Unsupported input size, too large (or negative) */
-
-    ctx->end += inputSize;
-    if (limit == limitedDestSize) oend -= LASTLITERALS;                  /* Hack for support limitations LZ4 decompressor */
-    if (inputSize < LZ4_minLength) goto _last_literals;                  /* Input too small, no compression (all literals) */
-
-    ip++;
-
-    /* Main Loop */
-    while (ip < mflimit) {
-        ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref), maxNbAttempts);
-        if (!ml) { ip++; continue; }
-
-        /* saved, in case we would skip too much */
-        start0 = ip;
-        ref0 = ref;
-        ml0 = ml;
-
-_Search2:
-        if (ip+ml < mflimit)
-            ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, ip + ml - 2, ip + 0, matchlimit, ml, &ref2, &start2, maxNbAttempts);
-        else
-            ml2 = ml;
-
-        if (ml2 == ml) { /* No better match */
-            optr = op;
-            if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) goto _dest_overflow;
-            continue;
-        }
-
-        if (start0 < ip) {
-            if (start2 < ip + ml0) {  /* empirical */
-                ip = start0;
-                ref = ref0;
-                ml = ml0;
-            }
-        }
-
-        /* Here, start0==ip */
-        if ((start2 - ip) < 3) {  /* First Match too small : removed */
-            ml = ml2;
-            ip = start2;
-            ref =ref2;
-            goto _Search2;
-        }
-
-_Search3:
-        /* At this stage, we have :
-        *  ml2 > ml1, and
-        *  ip1+3 <= ip2 (usually < ip1+ml1) */
-        if ((start2 - ip) < OPTIMAL_ML) {
-            int correction;
-            int new_ml = ml;
-            if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
-            if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
-            correction = new_ml - (int)(start2 - ip);
-            if (correction > 0) {
-                start2 += correction;
-                ref2 += correction;
-                ml2 -= correction;
-            }
-        }
-        /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */
-
-        if (start2 + ml2 < mflimit)
-            ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3, maxNbAttempts);
-        else
-            ml3 = ml2;
-
-        if (ml3 == ml2) {  /* No better match : 2 sequences to encode */
-            /* ip & ref are known; Now for ml */
-            if (start2 < ip+ml)  ml = (int)(start2 - ip);
-            /* Now, encode 2 sequences */
-            optr = op;
-            if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) goto _dest_overflow;
-            ip = start2;
-            optr = op;
-            if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml2, ref2, limit, oend)) goto _dest_overflow;
-            continue;
-        }
-
-        if (start3 < ip+ml+3) {  /* Not enough space for match 2 : remove it */
-            if (start3 >= (ip+ml)) {  /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
-                if (start2 < ip+ml) {
-                    int correction = (int)(ip+ml - start2);
-                    start2 += correction;
-                    ref2 += correction;
-                    ml2 -= correction;
-                    if (ml2 < MINMATCH) {
-                        start2 = start3;
-                        ref2 = ref3;
-                        ml2 = ml3;
-                    }
-                }
-
-                optr = op;
-                if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) goto _dest_overflow;
-                ip  = start3;
-                ref = ref3;
-                ml  = ml3;
-
-                start0 = start2;
-                ref0 = ref2;
-                ml0 = ml2;
-                goto _Search2;
-            }
-
-            start2 = start3;
-            ref2 = ref3;
-            ml2 = ml3;
-            goto _Search3;
-        }
-
-        /*
-        * OK, now we have 3 ascending matches; let's write at least the first one
-        * ip & ref are known; Now for ml
-        */
-        if (start2 < ip+ml) {
-            if ((start2 - ip) < (int)ML_MASK) {
-                int correction;
-                if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
-                if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
-                correction = ml - (int)(start2 - ip);
-                if (correction > 0) {
-                    start2 += correction;
-                    ref2 += correction;
-                    ml2 -= correction;
-                }
-            } else {
-                ml = (int)(start2 - ip);
-            }
-        }
-        optr = op;
-        if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) goto _dest_overflow;
-
-        ip = start2;
-        ref = ref2;
-        ml = ml2;
-
-        start2 = start3;
-        ref2 = ref3;
-        ml2 = ml3;
-
-        goto _Search3;
-    }
-
-_last_literals:
-    /* Encode Last Literals */
-    {   size_t lastRunSize = (size_t)(iend - anchor);  /* literals */
-        size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255;
-        size_t const totalSize = 1 + litLength + lastRunSize;
-        if (limit == limitedDestSize) oend += LASTLITERALS;  /* restore correct value */
-        if (limit && (op + totalSize > oend)) {
-            if (limit == limitedOutput) return 0;  /* Check output limit */
-            /* adapt lastRunSize to fill 'dest' */
-            lastRunSize  = (size_t)(oend - op) - 1;
-            litLength = (lastRunSize + 255 - RUN_MASK) / 255;
-            lastRunSize -= litLength;
-        }
-        ip = anchor + lastRunSize;
-
-        if (lastRunSize >= RUN_MASK) {
-            size_t accumulator = lastRunSize - RUN_MASK;
-            *op++ = (RUN_MASK << ML_BITS);
-            for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
-            *op++ = (BYTE) accumulator;
-        } else {
-            *op++ = (BYTE)(lastRunSize << ML_BITS);
-        }
-        memcpy(op, anchor, lastRunSize);
-        op += lastRunSize;
-    }
-
-    /* End */
-    *srcSizePtr = (int) (((const char*)ip) - source);
-    return (int) (((char*)op)-dest);
-
-_dest_overflow:
-    if (limit == limitedDestSize) {
-        op = optr;  /* restore correct out pointer */
-        goto _last_literals;
-    }
-    return 0;
-}
-
-static int LZ4HC_getSearchNum(int compressionLevel)
-{
-    switch (compressionLevel) {
-        default: return 0; /* unused */
-        case 11: return 128;
-        case 12: return 1<<10;
-    }
-}
-
-static int LZ4HC_compress_generic (
-    LZ4HC_CCtx_internal* const ctx,
-    const char* const src,
-    char* const dst,
-    int* const srcSizePtr,
-    int const dstCapacity,
-    int cLevel,
-    limitedOutput_directive limit
-    )
-{
-    if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT;   /* note : convention is different from lz4frame, maybe to reconsider */
-    if (cLevel > 9) {
-        if (limit == limitedDestSize) cLevel = 10;
-        switch (cLevel) {
-            case 10:
-                return LZ4HC_compress_hashChain(ctx, src, dst, srcSizePtr, dstCapacity, 1 << (15-1), limit);
-            case 11:
-                ctx->searchNum = LZ4HC_getSearchNum(cLevel);
-                return LZ4HC_compress_optimal(ctx, src, dst, *srcSizePtr, dstCapacity, limit, 128, 0);
-            default:
-            case 12:
-                ctx->searchNum = LZ4HC_getSearchNum(cLevel);
-                return LZ4HC_compress_optimal(ctx, src, dst, *srcSizePtr, dstCapacity, limit, LZ4_OPT_NUM, 1);
-        }
-    }
-    return LZ4HC_compress_hashChain(ctx, src, dst, srcSizePtr, dstCapacity, 1 << (cLevel-1), limit);  /* levels 1-9 */
-}
-
-
-int LZ4_sizeofStateHC(void) { return sizeof(LZ4_streamHC_t); }
-
-int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
-{
-    LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
-    if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0;   /* Error : state is not aligned for pointers (32 or 64 bits) */
-    LZ4HC_init (ctx, (const BYTE*)src);
-    if (dstCapacity < LZ4_compressBound(srcSize))
-        return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput);
-    else
-        return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, noLimit);
-}
-
-int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
-{
-#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
-    LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)malloc(sizeof(LZ4_streamHC_t));
-#else
-    LZ4_streamHC_t state;
-    LZ4_streamHC_t* const statePtr = &state;
-#endif
-    int const cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
-#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
-    free(statePtr);
-#endif
-    return cSize;
-}
-
-/* LZ4_compress_HC_destSize() :
- * currently, only compatible with Hash Chain implementation,
- * hence limit compression level to LZ4HC_CLEVEL_OPT_MIN-1*/
-int LZ4_compress_HC_destSize(void* LZ4HC_Data, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel)
-{
-    LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse;
-    LZ4HC_init(ctx, (const BYTE*) source);
-    return LZ4HC_compress_generic(ctx, source, dest, sourceSizePtr, targetDestSize, cLevel, limitedDestSize);
-}
-
-
-
-/**************************************
-*  Streaming Functions
-**************************************/
-/* allocation */
-LZ4_streamHC_t* LZ4_createStreamHC(void) { return (LZ4_streamHC_t*)malloc(sizeof(LZ4_streamHC_t)); }
-int             LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr) { free(LZ4_streamHCPtr); return 0; }
-
-
-/* initialization */
-void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
-{
-    LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= sizeof(size_t) * LZ4_STREAMHCSIZE_SIZET);   /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
-    LZ4_streamHCPtr->internal_donotuse.base = NULL;
-    if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX;  /* cap compression level */
-    LZ4_streamHCPtr->internal_donotuse.compressionLevel = compressionLevel;
-    LZ4_streamHCPtr->internal_donotuse.searchNum = LZ4HC_getSearchNum(compressionLevel);
-}
-
-int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, const char* dictionary, int dictSize)
-{
-    LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
-    if (dictSize > 64 KB) {
-        dictionary += dictSize - 64 KB;
-        dictSize = 64 KB;
-    }
-    LZ4HC_init (ctxPtr, (const BYTE*)dictionary);
-    ctxPtr->end = (const BYTE*)dictionary + dictSize;
-    if (ctxPtr->compressionLevel >= LZ4HC_CLEVEL_OPT_MIN)
-        LZ4HC_updateBinTree(ctxPtr, ctxPtr->end - MFLIMIT, ctxPtr->end - LASTLITERALS);
-    else
-        if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);
-    return dictSize;
-}
-
-
-/* compression */
-
-static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
-{
-    if (ctxPtr->compressionLevel >= LZ4HC_CLEVEL_OPT_MIN)
-        LZ4HC_updateBinTree(ctxPtr, ctxPtr->end - MFLIMIT, ctxPtr->end - LASTLITERALS);
-    else
-        if (ctxPtr->end >= ctxPtr->base + 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);   /* Referencing remaining dictionary content */
-
-    /* Only one memory segment for extDict, so any previous extDict is lost at this stage */
-    ctxPtr->lowLimit  = ctxPtr->dictLimit;
-    ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
-    ctxPtr->dictBase  = ctxPtr->base;
-    ctxPtr->base = newBlock - ctxPtr->dictLimit;
-    ctxPtr->end  = newBlock;
-    ctxPtr->nextToUpdate = ctxPtr->dictLimit;   /* match referencing will resume from there */
-}
-
-static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
-                                            const char* src, char* dst,
-                                            int* srcSizePtr, int dstCapacity,
-                                            limitedOutput_directive limit)
-{
-    LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
-    /* auto-init if forgotten */
-    if (ctxPtr->base == NULL) LZ4HC_init (ctxPtr, (const BYTE*) src);
-
-    /* Check overflow */
-    if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 GB) {
-        size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) - ctxPtr->dictLimit;
-        if (dictSize > 64 KB) dictSize = 64 KB;
-        LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
-    }
-
-    /* Check if blocks follow each other */
-    if ((const BYTE*)src != ctxPtr->end) LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src);
-
-    /* Check overlapping input/dictionary space */
-    {   const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr;
-        const BYTE* const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
-        const BYTE* const dictEnd   = ctxPtr->dictBase + ctxPtr->dictLimit;
-        if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) {
-            if (sourceEnd > dictEnd) sourceEnd = dictEnd;
-            ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
-            if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
-        }
-    }
-
-    return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
-}
-
-int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity)
-{
-    if (dstCapacity < LZ4_compressBound(srcSize))
-        return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput);
-    else
-        return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, noLimit);
-}
-
-int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize)
-{
-    LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
-    if (ctxPtr->compressionLevel >= LZ4HC_CLEVEL_OPT_MIN) LZ4HC_init(ctxPtr, (const BYTE*)src);   /* not compatible with btopt implementation */
-    return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, limitedDestSize);
-}
-
-
-
-/* dictionary saving */
-
-int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
-{
-    LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
-    int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
-    if (dictSize > 64 KB) dictSize = 64 KB;
-    if (dictSize < 4) dictSize = 0;
-    if (dictSize > prefixSize) dictSize = prefixSize;
-    memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
-    {   U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
-        streamPtr->end = (const BYTE*)safeBuffer + dictSize;
-        streamPtr->base = streamPtr->end - endIndex;
-        streamPtr->dictLimit = endIndex - dictSize;
-        streamPtr->lowLimit = endIndex - dictSize;
-        if (streamPtr->nextToUpdate < streamPtr->dictLimit) streamPtr->nextToUpdate = streamPtr->dictLimit;
-    }
-    return dictSize;
-}
-
-
-/***********************************
-*  Deprecated Functions
-***********************************/
-/* These functions currently generate deprecation warnings */
-/* Deprecated compression functions */
-int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
-int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); }
-int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
-int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); }
-int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
-int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); }
-int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
-int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); }
-int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); }
-int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); }
-
-
-/* Deprecated streaming functions */
-int LZ4_sizeofStreamStateHC(void) { return LZ4_STREAMHCSIZE; }
-
-int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
-{
-    LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
-    if ((((size_t)state) & (sizeof(void*)-1)) != 0) return 1;   /* Error : pointer is not aligned for pointer (32 or 64 bits) */
-    LZ4HC_init(ctx, (const BYTE*)inputBuffer);
-    ctx->inputBuffer = (BYTE*)inputBuffer;
-    return 0;
-}
-
-void* LZ4_createHC (char* inputBuffer)
-{
-    LZ4_streamHC_t* hc4 = (LZ4_streamHC_t*)ALLOCATOR(1, sizeof(LZ4_streamHC_t));
-    if (hc4 == NULL) return NULL;   /* not enough memory */
-    LZ4HC_init (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
-    hc4->internal_donotuse.inputBuffer = (BYTE*)inputBuffer;
-    return hc4;
-}
-
-int LZ4_freeHC (void* LZ4HC_Data) { FREEMEM(LZ4HC_Data); return 0; }
-
-int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
-{
-    return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, noLimit);
-}
-
-int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel)
-{
-    return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput);
-}
-
-char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
-{
-    LZ4HC_CCtx_internal* const hc4 = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse;
-    int const dictSize = LZ4_saveDictHC((LZ4_streamHC_t*)LZ4HC_Data, (char*)(hc4->inputBuffer), 64 KB);
-    return (char*)(hc4->inputBuffer + dictSize);
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/lz4hc.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/lz4hc.h b/thirdparty/librdkafka-0.11.1/src/lz4hc.h
deleted file mode 100644
index 2e3880d..0000000
--- a/thirdparty/librdkafka-0.11.1/src/lz4hc.h
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
-   LZ4 HC - High Compression Mode of LZ4
-   Header File
-   Copyright (C) 2011-2017, Yann Collet.
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - LZ4 source repository : https://github.com/lz4/lz4
-   - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-#ifndef LZ4_HC_H_19834876238432
-#define LZ4_HC_H_19834876238432
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* --- Dependency --- */
-/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
-#include "lz4.h"   /* stddef, LZ4LIB_API, LZ4_DEPRECATED */
-
-
-/* --- Useful constants --- */
-#define LZ4HC_CLEVEL_MIN         3
-#define LZ4HC_CLEVEL_DEFAULT     9
-#define LZ4HC_CLEVEL_OPT_MIN    11
-#define LZ4HC_CLEVEL_MAX        12
-
-
-/*-************************************
- *  Block Compression
- **************************************/
-/*! LZ4_compress_HC() :
- * Compress data from `src` into `dst`, using the more powerful but slower "HC" algorithm.
- * `dst` must be already allocated.
- * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h")
- * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h")
- * `compressionLevel` : Recommended values are between 4 and 9, although any value between 1 and LZ4HC_MAX_CLEVEL will work.
- *                      Values >LZ4HC_MAX_CLEVEL behave the same as LZ4HC_MAX_CLEVEL.
- * @return : the number of bytes written into 'dst'
- *           or 0 if compression fails.
- */
-LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel);
-
-
-/* Note :
- *   Decompression functions are provided within "lz4.h" (BSD license)
- */
-
-
-/*! LZ4_compress_HC_extStateHC() :
- * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`.
- * `state` size is provided by LZ4_sizeofStateHC().
- * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() will do properly).
- */
-LZ4LIB_API int LZ4_compress_HC_extStateHC(void* state, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel);
-LZ4LIB_API int LZ4_sizeofStateHC(void);
-
-
-/*-************************************
- *  Streaming Compression
- *  Bufferless synchronous API
- **************************************/
- typedef union LZ4_streamHC_u LZ4_streamHC_t;   /* incomplete type (defined later) */
-
-/*! LZ4_createStreamHC() and LZ4_freeStreamHC() :
- * These functions create and release memory for LZ4 HC streaming state.
- * Newly created states are automatically initialized.
- * Existing states can be re-used several times, using LZ4_resetStreamHC().
- * These methods are API and ABI stable, they can be used in combination with a DLL.
- */
-LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void);
-LZ4LIB_API int             LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr);
-
-LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionLevel);
-LZ4LIB_API int  LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* dictionary, int dictSize);
-
-LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr, const char* src, char* dst, int srcSize, int maxDstSize);
-
-LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize);
-
-/*
-  These functions compress data in successive blocks of any size, using previous blocks as dictionary.
-  One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks.
-  There is an exception for ring buffers, which can be smaller than 64 KB.
-  Ring buffers scenario is automatically detected and handled by LZ4_compress_HC_continue().
-
-  Before starting compression, state must be properly initialized, using LZ4_resetStreamHC().
-  A first "fictional block" can then be designated as initial dictionary, using LZ4_loadDictHC() (Optional).
-
-  Then, use LZ4_compress_HC_continue() to compress each successive block.
-  Previous memory blocks (including initial dictionary when present) must remain accessible and unmodified during compression.
-  'dst' buffer should be sized to handle worst case scenarios (see LZ4_compressBound()), to ensure operation success.
-  Because in case of failure, the API does not guarantee context recovery, and context will have to be reset.
-  If `dst` buffer budget cannot be >= LZ4_compressBound(), consider using LZ4_compress_HC_continue_destSize() instead.
-
-  If, for any reason, previous data block can't be preserved unmodified in memory for next compression block,
-  you can save it to a more stable memory space, using LZ4_saveDictHC().
-  Return value of LZ4_saveDictHC() is the size of dictionary effectively saved into 'safeBuffer'.
-*/
-
-
- /*-*************************************
- * PRIVATE DEFINITIONS :
- * Do not use these definitions.
- * They are exposed to allow static allocation of `LZ4_streamHC_t`.
- * Using these definitions makes the code vulnerable to potential API break when upgrading LZ4
- **************************************/
-#define LZ4HC_DICTIONARY_LOGSIZE 17   /* because of btopt, hc would only need 16 */
-#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE)
-#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1)
-
-#define LZ4HC_HASH_LOG 15
-#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG)
-#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
-
-
-#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#include <stdint.h>
-
-typedef struct
-{
-    uint32_t   hashTable[LZ4HC_HASHTABLESIZE];
-    uint16_t   chainTable[LZ4HC_MAXD];
-    const uint8_t* end;         /* next block here to continue on current prefix */
-    const uint8_t* base;        /* All index relative to this position */
-    const uint8_t* dictBase;    /* alternate base for extDict */
-    uint8_t* inputBuffer;       /* deprecated */
-    uint32_t   dictLimit;       /* below that point, need extDict */
-    uint32_t   lowLimit;        /* below that point, no more dict */
-    uint32_t   nextToUpdate;    /* index from which to continue dictionary update */
-    uint32_t   searchNum;       /* only for optimal parser */
-    uint32_t   compressionLevel;
-} LZ4HC_CCtx_internal;
-
-#else
-
-typedef struct
-{
-    unsigned int   hashTable[LZ4HC_HASHTABLESIZE];
-    unsigned short   chainTable[LZ4HC_MAXD];
-    const unsigned char* end;        /* next block here to continue on current prefix */
-    const unsigned char* base;       /* All index relative to this position */
-    const unsigned char* dictBase;   /* alternate base for extDict */
-    unsigned char* inputBuffer;      /* deprecated */
-    unsigned int   dictLimit;        /* below that point, need extDict */
-    unsigned int   lowLimit;         /* below that point, no more dict */
-    unsigned int   nextToUpdate;     /* index from which to continue dictionary update */
-    unsigned int   searchNum;        /* only for optimal parser */
-    int            compressionLevel;
-} LZ4HC_CCtx_internal;
-
-#endif
-
-#define LZ4_STREAMHCSIZE       (4*LZ4HC_HASHTABLESIZE + 2*LZ4HC_MAXD + 56) /* 393268 */
-#define LZ4_STREAMHCSIZE_SIZET (LZ4_STREAMHCSIZE / sizeof(size_t))
-union LZ4_streamHC_u {
-    size_t table[LZ4_STREAMHCSIZE_SIZET];
-    LZ4HC_CCtx_internal internal_donotuse;
-};   /* previously typedef'd to LZ4_streamHC_t */
-/*
-  LZ4_streamHC_t :
-  This structure allows static allocation of LZ4 HC streaming state.
-  State must be initialized using LZ4_resetStreamHC() before first use.
-
-  Static allocation shall only be used in combination with static linking.
-  When invoking LZ4 from a DLL, use create/free functions instead, which are API and ABI stable.
-*/
-
-
-/*-************************************
-*  Deprecated Functions
-**************************************/
-/* see lz4.h LZ4_DISABLE_DEPRECATE_WARNINGS to turn off deprecation warnings */
-
-/* deprecated compression functions */
-/* these functions will trigger warning messages in future releases */
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC               (const char* source, char* dest, int inputSize);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC2_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") int LZ4_compressHC_withStateHC               (void* state, const char* source, char* dest, int inputSize);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") int LZ4_compressHC2_limitedOutput_withStateHC(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") int LZ4_compressHC_continue               (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
-
-/* Deprecated Streaming functions using older model; should no longer be used */
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStreamHC() instead") void* LZ4_createHC (char* inputBuffer);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_saveDictHC() instead")     char* LZ4_slideInputBufferHC (void* LZ4HC_Data);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_freeStreamHC() instead")   int   LZ4_freeHC (void* LZ4HC_Data);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStreamHC() instead") int   LZ4_sizeofStreamStateHC(void);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_resetStreamHC() instead")  int   LZ4_resetStreamStateHC(void* state, char* inputBuffer);
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* LZ4_HC_H_19834876238432 */
-
-/*-************************************************
- * !!!!!     STATIC LINKING ONLY     !!!!!
- * Following definitions are considered experimental.
- * They should not be linked from DLL,
- * as there is no guarantee of API stability yet.
- * Prototypes will be promoted to "stable" status
- * after successfull usage in real-life scenarios.
- *************************************************/
-#ifdef LZ4_HC_STATIC_LINKING_ONLY   /* protection macro */
-#ifndef LZ4_HC_SLO_098092834
-#define LZ4_HC_SLO_098092834
-
-/*! LZ4_compress_HC_destSize() :
- *  Will try to compress as much data from `src` as possible
- *  that can fit in `targetDstSize` budget.
- *  Result is provided in 2 parts :
- * @return : the number of bytes written into 'dst'
- *           or 0 if compression fails.
- * `srcSizePtr` : value will be updated to indicate how much bytes were read from `src`
- */
-LZ4LIB_API int LZ4_compress_HC_destSize(void* LZ4HC_Data,
-                            const char* src, char* dst,
-                            int* srcSizePtr, int targetDstSize,
-                            int compressionLevel);
-
-/*! LZ4_compress_HC_continue_destSize() :
- *  Similar as LZ4_compress_HC_continue(),
- *  but will read a variable nb of bytes from `src`
- *  to fit into `targetDstSize` budget.
- *  Result is provided in 2 parts :
- * @return : the number of bytes written into 'dst'
- *           or 0 if compression fails.
- * `srcSizePtr` : value will be updated to indicate how much bytes were read from `src`
- * Important : due to limitations, this prototype only works well up to cLevel < LZ4HC_CLEVEL_OPT_MIN
- *             beyond that level, compression performance will be much reduced due to internal incompatibilities
- */
-LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr,
-                            const char* src, char* dst,
-                            int* srcSizePtr, int targetDstSize);
-
-#endif   /* LZ4_HC_SLO_098092834 */
-#endif   /* LZ4_HC_STATIC_LINKING_ONLY */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/lz4opt.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/lz4opt.h b/thirdparty/librdkafka-0.11.1/src/lz4opt.h
deleted file mode 100644
index 416241a..0000000
--- a/thirdparty/librdkafka-0.11.1/src/lz4opt.h
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
-    lz4opt.h - Optimal Mode of LZ4
-    Copyright (C) 2015-2017, Przemyslaw Skibinski <in...@gmail.com>
-    Note : this file is intended to be included within lz4hc.c
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-       - LZ4 source repository : https://github.com/lz4/lz4
-       - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-#define LZ4_OPT_NUM   (1<<12)
-
-
-typedef struct {
-    int off;
-    int len;
-} LZ4HC_match_t;
-
-typedef struct {
-    int price;
-    int off;
-    int mlen;
-    int litlen;
-} LZ4HC_optimal_t;
-
-
-/* price in bits */
-FORCE_INLINE size_t LZ4HC_literalsPrice(size_t litlen)
-{
-    size_t price = litlen;
-    if (litlen >= (size_t)RUN_MASK) price += 1 + (litlen-RUN_MASK)/255;
-    return price;
-}
-
-
-/* requires mlen >= MINMATCH */
-FORCE_INLINE size_t LZ4HC_sequencePrice(size_t litlen, size_t mlen)
-{
-    size_t price = 2 + 1; /* 16-bit offset + token */
-
-    price += LZ4HC_literalsPrice(litlen);
-
-    if (mlen >= (size_t)(ML_MASK+MINMATCH))
-        price+= 1+(mlen-(ML_MASK+MINMATCH))/255;
-
-    return price;
-}
-
-
-/*-*************************************
-*  Binary Tree search
-***************************************/
-FORCE_INLINE int LZ4HC_BinTree_InsertAndGetAllMatches (
-    LZ4HC_CCtx_internal* ctx,
-    const BYTE* const ip,
-    const BYTE* const iHighLimit,
-    size_t best_mlen,
-    LZ4HC_match_t* matches,
-    int* matchNum)
-{
-    U16* const chainTable = ctx->chainTable;
-    U32* const HashTable = ctx->hashTable;
-    const BYTE* const base = ctx->base;
-    const U32 dictLimit = ctx->dictLimit;
-    const U32 current = (U32)(ip - base);
-    const U32 lowLimit = (ctx->lowLimit + MAX_DISTANCE > current) ? ctx->lowLimit : current - (MAX_DISTANCE - 1);
-    const BYTE* const dictBase = ctx->dictBase;
-    const BYTE* match;
-    int nbAttempts = ctx->searchNum;
-    int mnum = 0;
-    U16 *ptr0, *ptr1, delta0, delta1;
-    U32 matchIndex;
-    size_t matchLength = 0;
-    U32* HashPos;
-
-    if (ip + MINMATCH > iHighLimit) return 1;
-
-    /* HC4 match finder */
-    HashPos = &HashTable[LZ4HC_hashPtr(ip)];
-    matchIndex = *HashPos;
-    *HashPos = current;
-
-    ptr0 = &DELTANEXTMAXD(current*2+1);
-    ptr1 = &DELTANEXTMAXD(current*2);
-    delta0 = delta1 = (U16)(current - matchIndex);
-
-    while ((matchIndex < current) && (matchIndex>=lowLimit) && (nbAttempts)) {
-        nbAttempts--;
-        if (matchIndex >= dictLimit) {
-            match = base + matchIndex;
-            matchLength = LZ4_count(ip, match, iHighLimit);
-        } else {
-            const BYTE* vLimit = ip + (dictLimit - matchIndex);
-            match = dictBase + matchIndex;
-            if (vLimit > iHighLimit) vLimit = iHighLimit;
-            matchLength = LZ4_count(ip, match, vLimit);
-            if ((ip+matchLength == vLimit) && (vLimit < iHighLimit))
-                matchLength += LZ4_count(ip+matchLength, base+dictLimit, iHighLimit);
-        }
-
-        if (matchLength > best_mlen) {
-            best_mlen = matchLength;
-            if (matches) {
-                if (matchIndex >= dictLimit)
-                    matches[mnum].off = (int)(ip - match);
-                else
-                    matches[mnum].off = (int)(ip - (base + matchIndex)); /* virtual matchpos */
-                matches[mnum].len = (int)matchLength;
-                mnum++;
-            }
-            if (best_mlen > LZ4_OPT_NUM) break;
-        }
-
-        if (ip+matchLength >= iHighLimit)   /* equal : no way to know if inf or sup */
-            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
-
-        if (*(ip+matchLength) < *(match+matchLength)) {
-            *ptr0 = delta0;
-            ptr0 = &DELTANEXTMAXD(matchIndex*2);
-            if (*ptr0 == (U16)-1) break;
-            delta0 = *ptr0;
-            delta1 += delta0;
-            matchIndex -= delta0;
-        } else {
-            *ptr1 = delta1;
-            ptr1 = &DELTANEXTMAXD(matchIndex*2+1);
-            if (*ptr1 == (U16)-1) break;
-            delta1 = *ptr1;
-            delta0 += delta1;
-            matchIndex -= delta1;
-        }
-    }
-
-    *ptr0 = (U16)-1;
-    *ptr1 = (U16)-1;
-    if (matchNum) *matchNum = mnum;
-  /*  if (best_mlen > 8) return best_mlen-8; */
-    if (!matchNum) return 1;
-    return 1;
-}
-
-
-FORCE_INLINE void LZ4HC_updateBinTree(LZ4HC_CCtx_internal* ctx, const BYTE* const ip, const BYTE* const iHighLimit)
-{
-    const BYTE* const base = ctx->base;
-    const U32 target = (U32)(ip - base);
-    U32 idx = ctx->nextToUpdate;
-    while(idx < target)
-        idx += LZ4HC_BinTree_InsertAndGetAllMatches(ctx, base+idx, iHighLimit, 8, NULL, NULL);
-}
-
-
-/** Tree updater, providing best match */
-FORCE_INLINE int LZ4HC_BinTree_GetAllMatches (
-                        LZ4HC_CCtx_internal* ctx,
-                        const BYTE* const ip, const BYTE* const iHighLimit,
-                        size_t best_mlen, LZ4HC_match_t* matches, const int fullUpdate)
-{
-    int mnum = 0;
-    if (ip < ctx->base + ctx->nextToUpdate) return 0;   /* skipped area */
-    if (fullUpdate) LZ4HC_updateBinTree(ctx, ip, iHighLimit);
-    best_mlen = LZ4HC_BinTree_InsertAndGetAllMatches(ctx, ip, iHighLimit, best_mlen, matches, &mnum);
-    ctx->nextToUpdate = (U32)(ip - ctx->base + best_mlen);
-    return mnum;
-}
-
-
-#define SET_PRICE(pos, ml, offset, ll, cost)           \
-{                                                      \
-    while (last_pos < pos)  { opt[last_pos+1].price = 1<<30; last_pos++; } \
-    opt[pos].mlen = (int)ml;                           \
-    opt[pos].off = (int)offset;                        \
-    opt[pos].litlen = (int)ll;                         \
-    opt[pos].price = (int)cost;                        \
-}
-
-
-static int LZ4HC_compress_optimal (
-    LZ4HC_CCtx_internal* ctx,
-    const char* const source,
-    char* dest,
-    int inputSize,
-    int maxOutputSize,
-    limitedOutput_directive limit,
-    size_t sufficient_len,
-    const int fullUpdate
-    )
-{
-    LZ4HC_optimal_t opt[LZ4_OPT_NUM + 1];   /* this uses a bit too much stack memory to my taste ... */
-    LZ4HC_match_t matches[LZ4_OPT_NUM + 1];
-
-    const BYTE* ip = (const BYTE*) source;
-    const BYTE* anchor = ip;
-    const BYTE* const iend = ip + inputSize;
-    const BYTE* const mflimit = iend - MFLIMIT;
-    const BYTE* const matchlimit = (iend - LASTLITERALS);
-    BYTE* op = (BYTE*) dest;
-    BYTE* const oend = op + maxOutputSize;
-
-    /* init */
-    if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1;
-    ctx->end += inputSize;
-    ip++;
-
-    /* Main Loop */
-    while (ip < mflimit) {
-        size_t const llen = ip - anchor;
-        size_t last_pos = 0;
-        size_t match_num, cur, best_mlen, best_off;
-        memset(opt, 0, sizeof(LZ4HC_optimal_t));
-
-        match_num = LZ4HC_BinTree_GetAllMatches(ctx, ip, matchlimit, MINMATCH-1, matches, fullUpdate);
-        if (!match_num) { ip++; continue; }
-
-        if ((size_t)matches[match_num-1].len > sufficient_len) {
-            /* good enough solution : immediate encoding */
-            best_mlen = matches[match_num-1].len;
-            best_off = matches[match_num-1].off;
-            cur = 0;
-            last_pos = 1;
-            goto encode;
-        }
-
-        /* set prices using matches at position = 0 */
-        {   size_t matchNb;
-            for (matchNb = 0; matchNb < match_num; matchNb++) {
-                size_t mlen = (matchNb>0) ? (size_t)matches[matchNb-1].len+1 : MINMATCH;
-                best_mlen = matches[matchNb].len;   /* necessarily < sufficient_len < LZ4_OPT_NUM */
-                for ( ; mlen <= best_mlen ; mlen++) {
-                    size_t const cost = LZ4HC_sequencePrice(llen, mlen) - LZ4HC_literalsPrice(llen);
-                    SET_PRICE(mlen, mlen, matches[matchNb].off, 0, cost);   /* updates last_pos and opt[pos] */
-        }   }   }
-
-        if (last_pos < MINMATCH) { ip++; continue; }  /* note : on clang at least, this test improves performance */
-
-        /* check further positions */
-        opt[0].mlen = opt[1].mlen = 1;
-        for (cur = 1; cur <= last_pos; cur++) {
-            const BYTE* const curPtr = ip + cur;
-
-            /* establish baseline price if cur is literal */
-            {   size_t price, litlen;
-                if (opt[cur-1].mlen == 1) {
-                    /* no match at previous position */
-                    litlen = opt[cur-1].litlen + 1;
-                    if (cur > litlen) {
-                        price = opt[cur - litlen].price + LZ4HC_literalsPrice(litlen);
-                    } else {
-                        price = LZ4HC_literalsPrice(llen + litlen) - LZ4HC_literalsPrice(llen);
-                    }
-                } else {
-                    litlen = 1;
-                    price = opt[cur - 1].price + LZ4HC_literalsPrice(1);
-                }
-
-                if (price < (size_t)opt[cur].price)
-                    SET_PRICE(cur, 1, 0, litlen, price);   /* note : increases last_pos */
-            }
-
-            if (cur == last_pos || curPtr >= mflimit) break;
-
-            match_num = LZ4HC_BinTree_GetAllMatches(ctx, curPtr, matchlimit, MINMATCH-1, matches, fullUpdate);
-            if ((match_num > 0) && (size_t)matches[match_num-1].len > sufficient_len) {
-                /* immediate encoding */
-                best_mlen = matches[match_num-1].len;
-                best_off = matches[match_num-1].off;
-                last_pos = cur + 1;
-                goto encode;
-            }
-
-            /* set prices using matches at position = cur */
-            {   size_t matchNb;
-                for (matchNb = 0; matchNb < match_num; matchNb++) {
-                    size_t ml = (matchNb>0) ? (size_t)matches[matchNb-1].len+1 : MINMATCH;
-                    best_mlen = (cur + matches[matchNb].len < LZ4_OPT_NUM) ?
-                                (size_t)matches[matchNb].len : LZ4_OPT_NUM - cur;
-
-                    for ( ; ml <= best_mlen ; ml++) {
-                        size_t ll, price;
-                        if (opt[cur].mlen == 1) {
-                            ll = opt[cur].litlen;
-                            if (cur > ll)
-                                price = opt[cur - ll].price + LZ4HC_sequencePrice(ll, ml);
-                            else
-                                price = LZ4HC_sequencePrice(llen + ll, ml) - LZ4HC_literalsPrice(llen);
-                        } else {
-                            ll = 0;
-                            price = opt[cur].price + LZ4HC_sequencePrice(0, ml);
-                        }
-
-                        if (cur + ml > last_pos || price < (size_t)opt[cur + ml].price) {
-                            SET_PRICE(cur + ml, ml, matches[matchNb].off, ll, price);
-            }   }   }   }
-        } /* for (cur = 1; cur <= last_pos; cur++) */
-
-        best_mlen = opt[last_pos].mlen;
-        best_off = opt[last_pos].off;
-        cur = last_pos - best_mlen;
-
-encode: /* cur, last_pos, best_mlen, best_off must be set */
-        opt[0].mlen = 1;
-        while (1) {  /* from end to beginning */
-            size_t const ml = opt[cur].mlen;
-            int const offset = opt[cur].off;
-            opt[cur].mlen = (int)best_mlen;
-            opt[cur].off = (int)best_off;
-            best_mlen = ml;
-            best_off = offset;
-            if (ml > cur) break;
-            cur -= ml;
-        }
-
-        /* encode all recorded sequences */
-        cur = 0;
-        while (cur < last_pos) {
-            int const ml = opt[cur].mlen;
-            int const offset = opt[cur].off;
-            if (ml == 1) { ip++; cur++; continue; }
-            cur += ml;
-            if ( LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ip - offset, limit, oend) ) return 0;
-        }
-    }  /* while (ip < mflimit) */
-
-    /* Encode Last Literals */
-    {   int lastRun = (int)(iend - anchor);
-        if ((limit) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) return 0;  /* Check output limit */
-        if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
-        else *op++ = (BYTE)(lastRun<<ML_BITS);
-        memcpy(op, anchor, iend - anchor);
-        op += iend-anchor;
-    }
-
-    /* End */
-    return (int) ((char*)op-dest);
-}


[51/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
MINIFICPP-512 - upgrade to librdkafka 0.11.4

This closes #345.

Signed-off-by: Marc Parisi <ph...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/commit/7528d23e
Tree: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/tree/7528d23e
Diff: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/diff/7528d23e

Branch: refs/heads/master
Commit: 7528d23eecd0dc83b97a35f3e699804d25bf842d
Parents: bc6d2a1
Author: Dustin Rodrigues <du...@gmail.com>
Authored: Sat May 26 18:34:31 2018 -0400
Committer: Marc Parisi <ph...@apache.org>
Committed: Wed Jun 6 10:13:56 2018 -0400

----------------------------------------------------------------------
 CMakeLists.txt                                  |    2 +-
 libminifi/test/kafka-tests/CMakeLists.txt       |    4 +-
 thirdparty/librdkafka-0.11.1/.appveyor.yml      |   88 -
 thirdparty/librdkafka-0.11.1/.dir-locals.el     |    3 -
 thirdparty/librdkafka-0.11.1/.doozer.json       |   88 -
 .../librdkafka-0.11.1/.github/ISSUE_TEMPLATE    |   26 -
 thirdparty/librdkafka-0.11.1/.gitignore         |   29 -
 thirdparty/librdkafka-0.11.1/.travis.yml        |   41 -
 thirdparty/librdkafka-0.11.1/CMakeLists.txt     |  168 -
 thirdparty/librdkafka-0.11.1/CONFIGURATION.md   |  128 -
 thirdparty/librdkafka-0.11.1/CONTRIBUTING.md    |  271 --
 thirdparty/librdkafka-0.11.1/Doxyfile           | 2385 ----------
 thirdparty/librdkafka-0.11.1/INTRODUCTION.md    |  566 ---
 thirdparty/librdkafka-0.11.1/LICENSE            |   25 -
 thirdparty/librdkafka-0.11.1/LICENSE.crc32c     |   28 -
 thirdparty/librdkafka-0.11.1/LICENSE.lz4        |   26 -
 thirdparty/librdkafka-0.11.1/LICENSE.pycrc      |   23 -
 thirdparty/librdkafka-0.11.1/LICENSE.queue      |   31 -
 thirdparty/librdkafka-0.11.1/LICENSE.regexp     |    5 -
 thirdparty/librdkafka-0.11.1/LICENSE.snappy     |   36 -
 .../librdkafka-0.11.1/LICENSE.tinycthread       |   26 -
 thirdparty/librdkafka-0.11.1/LICENSE.wingetopt  |   49 -
 thirdparty/librdkafka-0.11.1/LICENSES.txt       |  284 --
 thirdparty/librdkafka-0.11.1/Makefile           |   68 -
 thirdparty/librdkafka-0.11.1/README.md          |  160 -
 thirdparty/librdkafka-0.11.1/README.win32       |   28 -
 thirdparty/librdkafka-0.11.1/config.h.in        |   39 -
 thirdparty/librdkafka-0.11.1/configure          |  214 -
 .../librdkafka-0.11.1/configure.librdkafka      |  204 -
 thirdparty/librdkafka-0.11.1/dev-conf.sh        |   15 -
 .../librdkafka-0.11.1/examples/.gitignore       |    7 -
 .../librdkafka-0.11.1/examples/CMakeLists.txt   |   20 -
 thirdparty/librdkafka-0.11.1/examples/Makefile  |   92 -
 .../librdkafka-0.11.1/examples/globals.json     |   11 -
 .../examples/kafkatest_verifiable_client.cpp    |  934 ----
 .../examples/rdkafka_consumer_example.c         |  624 ---
 .../examples/rdkafka_consumer_example.cpp       |  485 --
 .../examples/rdkafka_example.c                  |  806 ----
 .../examples/rdkafka_example.cpp                |  645 ---
 .../examples/rdkafka_performance.c              | 1561 -------
 .../examples/rdkafka_simple_producer.c          |  260 --
 .../examples/rdkafka_zookeeper_example.c        |  728 ---
 thirdparty/librdkafka-0.11.1/lds-gen.py         |   38 -
 thirdparty/librdkafka-0.11.1/mainpage.doxy      |   35 -
 .../librdkafka-0.11.1/mklove/Makefile.base      |  193 -
 .../mklove/modules/configure.atomics            |  144 -
 .../mklove/modules/configure.base               | 1772 --------
 .../mklove/modules/configure.builtin            |   62 -
 .../mklove/modules/configure.cc                 |  178 -
 .../mklove/modules/configure.cxx                |    8 -
 .../mklove/modules/configure.fileversion        |   65 -
 .../mklove/modules/configure.gitversion         |   19 -
 .../mklove/modules/configure.good_cflags        |   18 -
 .../mklove/modules/configure.host               |  110 -
 .../mklove/modules/configure.lib                |   49 -
 .../mklove/modules/configure.parseversion       |   95 -
 .../mklove/modules/configure.pic                |   16 -
 .../mklove/modules/configure.socket             |   20 -
 .../librdkafka-0.11.1/packaging/RELEASE.md      |  116 -
 .../packaging/archlinux/PKGBUILD                |    5 -
 .../packaging/cmake/Config.cmake.in             |   20 -
 .../cmake/try_compile/atomic_32_test.c          |    8 -
 .../cmake/try_compile/atomic_64_test.c          |    8 -
 .../cmake/try_compile/rdkafka_setup.cmake       |   76 -
 .../packaging/cmake/try_compile/regex_test.c    |   10 -
 .../packaging/cmake/try_compile/strndup_test.c  |    5 -
 .../packaging/cmake/try_compile/sync_32_test.c  |    8 -
 .../packaging/cmake/try_compile/sync_64_test.c  |    8 -
 .../packaging/debian/.gitignore                 |    6 -
 .../packaging/debian/changelog                  |   66 -
 .../librdkafka-0.11.1/packaging/debian/compat   |    1 -
 .../librdkafka-0.11.1/packaging/debian/control  |   49 -
 .../packaging/debian/copyright                  |   84 -
 .../librdkafka-0.11.1/packaging/debian/docs     |    3 -
 .../librdkafka-0.11.1/packaging/debian/gbp.conf |    9 -
 .../packaging/debian/librdkafka-dev.dirs        |    2 -
 .../packaging/debian/librdkafka-dev.examples    |    2 -
 .../packaging/debian/librdkafka-dev.install     |    6 -
 .../packaging/debian/librdkafka-dev.substvars   |    1 -
 .../packaging/debian/librdkafka.dsc             |   16 -
 .../packaging/debian/librdkafka1-dbg.substvars  |    1 -
 .../packaging/debian/librdkafka1.dirs           |    1 -
 .../packaging/debian/librdkafka1.install        |    2 -
 .../debian/librdkafka1.postinst.debhelper       |    5 -
 .../debian/librdkafka1.postrm.debhelper         |    5 -
 .../packaging/debian/librdkafka1.symbols        |   64 -
 .../librdkafka-0.11.1/packaging/debian/rules    |   17 -
 .../packaging/debian/source/format              |    1 -
 .../librdkafka-0.11.1/packaging/debian/watch    |    2 -
 .../librdkafka-0.11.1/packaging/get_version.py  |   21 -
 .../packaging/homebrew/README.md                |   15 -
 .../packaging/homebrew/brew-update-pr.sh        |   31 -
 .../packaging/nuget/.gitignore                  |    5 -
 .../librdkafka-0.11.1/packaging/nuget/README.md |   50 -
 .../packaging/nuget/artifact.py                 |  173 -
 .../librdkafka-0.11.1/packaging/nuget/nuget.sh  |   21 -
 .../packaging/nuget/packaging.py                |  596 ---
 .../packaging/nuget/release.py                  |   91 -
 .../packaging/nuget/requirements.txt            |    2 -
 .../nuget/templates/librdkafka.redist.nuspec    |   21 -
 .../nuget/templates/librdkafka.redist.props     |   18 -
 .../nuget/templates/librdkafka.redist.targets   |   19 -
 .../packaging/nuget/zfile/__init__.py           |    0
 .../packaging/nuget/zfile/zfile.py              |  100 -
 .../librdkafka-0.11.1/packaging/rpm/.gitignore  |    3 -
 .../librdkafka-0.11.1/packaging/rpm/Makefile    |   79 -
 .../packaging/rpm/el7-x86_64.cfg                |   40 -
 .../packaging/rpm/librdkafka.spec               |  103 -
 .../librdkafka-0.11.1/src-cpp/CMakeLists.txt    |   33 -
 .../librdkafka-0.11.1/src-cpp/ConfImpl.cpp      |   89 -
 .../librdkafka-0.11.1/src-cpp/ConsumerImpl.cpp  |  233 -
 .../librdkafka-0.11.1/src-cpp/HandleImpl.cpp    |  365 --
 .../src-cpp/KafkaConsumerImpl.cpp               |  257 --
 thirdparty/librdkafka-0.11.1/src-cpp/Makefile   |   58 -
 .../librdkafka-0.11.1/src-cpp/MessageImpl.cpp   |   38 -
 .../librdkafka-0.11.1/src-cpp/MetadataImpl.cpp  |  151 -
 .../librdkafka-0.11.1/src-cpp/ProducerImpl.cpp  |  167 -
 .../librdkafka-0.11.1/src-cpp/QueueImpl.cpp     |   66 -
 thirdparty/librdkafka-0.11.1/src-cpp/README.md  |   16 -
 .../librdkafka-0.11.1/src-cpp/RdKafka.cpp       |   52 -
 .../librdkafka-0.11.1/src-cpp/TopicImpl.cpp     |  124 -
 .../src-cpp/TopicPartitionImpl.cpp              |   55 -
 .../librdkafka-0.11.1/src-cpp/rdkafkacpp.h      | 2190 ---------
 .../librdkafka-0.11.1/src-cpp/rdkafkacpp_int.h  |  897 ----
 thirdparty/librdkafka-0.11.1/src/CMakeLists.txt |  128 -
 thirdparty/librdkafka-0.11.1/src/Makefile       |   81 -
 thirdparty/librdkafka-0.11.1/src/crc32c.c       |  427 --
 thirdparty/librdkafka-0.11.1/src/crc32c.h       |   35 -
 .../src/librdkafka_cgrp_synch.png               |  Bin 93796 -> 0 bytes
 thirdparty/librdkafka-0.11.1/src/lz4.c          | 1462 ------
 thirdparty/librdkafka-0.11.1/src/lz4.h          |  463 --
 thirdparty/librdkafka-0.11.1/src/lz4frame.c     | 1440 ------
 thirdparty/librdkafka-0.11.1/src/lz4frame.h     |  367 --
 .../librdkafka-0.11.1/src/lz4frame_static.h     |   98 -
 thirdparty/librdkafka-0.11.1/src/lz4hc.c        |  786 ----
 thirdparty/librdkafka-0.11.1/src/lz4hc.h        |  269 --
 thirdparty/librdkafka-0.11.1/src/lz4opt.h       |  360 --
 thirdparty/librdkafka-0.11.1/src/queue.h        |  850 ----
 thirdparty/librdkafka-0.11.1/src/rd.h           |  455 --
 thirdparty/librdkafka-0.11.1/src/rdaddr.c       |  220 -
 thirdparty/librdkafka-0.11.1/src/rdaddr.h       |  184 -
 thirdparty/librdkafka-0.11.1/src/rdatomic.h     |  188 -
 thirdparty/librdkafka-0.11.1/src/rdavg.h        |   95 -
 thirdparty/librdkafka-0.11.1/src/rdavl.c        |  214 -
 thirdparty/librdkafka-0.11.1/src/rdavl.h        |  253 --
 thirdparty/librdkafka-0.11.1/src/rdbuf.c        | 1547 -------
 thirdparty/librdkafka-0.11.1/src/rdbuf.h        |  325 --
 thirdparty/librdkafka-0.11.1/src/rdcrc32.c      |  113 -
 thirdparty/librdkafka-0.11.1/src/rdcrc32.h      |  146 -
 thirdparty/librdkafka-0.11.1/src/rddl.c         |  179 -
 thirdparty/librdkafka-0.11.1/src/rddl.h         |   41 -
 thirdparty/librdkafka-0.11.1/src/rdendian.h     |  151 -
 thirdparty/librdkafka-0.11.1/src/rdgz.c         |  124 -
 thirdparty/librdkafka-0.11.1/src/rdgz.h         |   42 -
 thirdparty/librdkafka-0.11.1/src/rdinterval.h   |  116 -
 thirdparty/librdkafka-0.11.1/src/rdkafka.c      | 3392 --------------
 thirdparty/librdkafka-0.11.1/src/rdkafka.h      | 3820 ----------------
 .../librdkafka-0.11.1/src/rdkafka_assignor.c    |  551 ---
 .../librdkafka-0.11.1/src/rdkafka_assignor.h    |  159 -
 .../librdkafka-0.11.1/src/rdkafka_broker.c      | 3797 ----------------
 .../librdkafka-0.11.1/src/rdkafka_broker.h      |  328 --
 thirdparty/librdkafka-0.11.1/src/rdkafka_buf.c  |  428 --
 thirdparty/librdkafka-0.11.1/src/rdkafka_buf.h  |  819 ----
 thirdparty/librdkafka-0.11.1/src/rdkafka_cgrp.c | 3204 -------------
 thirdparty/librdkafka-0.11.1/src/rdkafka_cgrp.h |  275 --
 thirdparty/librdkafka-0.11.1/src/rdkafka_conf.c | 2151 ---------
 thirdparty/librdkafka-0.11.1/src/rdkafka_conf.h |  338 --
 .../librdkafka-0.11.1/src/rdkafka_event.c       |  232 -
 .../librdkafka-0.11.1/src/rdkafka_event.h       |   81 -
 .../librdkafka-0.11.1/src/rdkafka_feature.c     |  444 --
 .../librdkafka-0.11.1/src/rdkafka_feature.h     |   79 -
 thirdparty/librdkafka-0.11.1/src/rdkafka_int.h  |  435 --
 .../librdkafka-0.11.1/src/rdkafka_interceptor.c |  624 ---
 .../librdkafka-0.11.1/src/rdkafka_interceptor.h |   71 -
 thirdparty/librdkafka-0.11.1/src/rdkafka_lz4.c  |  429 --
 thirdparty/librdkafka-0.11.1/src/rdkafka_lz4.h  |   40 -
 .../librdkafka-0.11.1/src/rdkafka_metadata.c    | 1017 -----
 .../librdkafka-0.11.1/src/rdkafka_metadata.h    |  157 -
 .../src/rdkafka_metadata_cache.c                |  732 ---
 thirdparty/librdkafka-0.11.1/src/rdkafka_msg.c  |  800 ----
 thirdparty/librdkafka-0.11.1/src/rdkafka_msg.h  |  290 --
 .../librdkafka-0.11.1/src/rdkafka_msgset.h      |   47 -
 .../src/rdkafka_msgset_reader.c                 | 1090 -----
 .../src/rdkafka_msgset_writer.c                 | 1161 -----
 .../librdkafka-0.11.1/src/rdkafka_offset.c      | 1139 -----
 .../librdkafka-0.11.1/src/rdkafka_offset.h      |   72 -
 thirdparty/librdkafka-0.11.1/src/rdkafka_op.c   |  662 ---
 thirdparty/librdkafka-0.11.1/src/rdkafka_op.h   |  400 --
 .../librdkafka-0.11.1/src/rdkafka_partition.c   | 3272 --------------
 .../librdkafka-0.11.1/src/rdkafka_partition.h   |  636 ---
 .../librdkafka-0.11.1/src/rdkafka_pattern.c     |  224 -
 .../librdkafka-0.11.1/src/rdkafka_pattern.h     |   65 -
 .../librdkafka-0.11.1/src/rdkafka_plugin.c      |  209 -
 .../librdkafka-0.11.1/src/rdkafka_plugin.h      |   37 -
 .../librdkafka-0.11.1/src/rdkafka_proto.h       |  498 ---
 .../librdkafka-0.11.1/src/rdkafka_queue.c       |  860 ----
 .../librdkafka-0.11.1/src/rdkafka_queue.h       |  731 ---
 .../src/rdkafka_range_assignor.c                |  125 -
 .../librdkafka-0.11.1/src/rdkafka_request.c     | 1848 --------
 .../librdkafka-0.11.1/src/rdkafka_request.h     |  196 -
 .../src/rdkafka_roundrobin_assignor.c           |  114 -
 thirdparty/librdkafka-0.11.1/src/rdkafka_sasl.c |  343 --
 thirdparty/librdkafka-0.11.1/src/rdkafka_sasl.h |   46 -
 .../librdkafka-0.11.1/src/rdkafka_sasl_cyrus.c  |  623 ---
 .../librdkafka-0.11.1/src/rdkafka_sasl_int.h    |   70 -
 .../librdkafka-0.11.1/src/rdkafka_sasl_plain.c  |  128 -
 .../librdkafka-0.11.1/src/rdkafka_sasl_scram.c  |  901 ----
 .../librdkafka-0.11.1/src/rdkafka_sasl_win32.c  |  526 ---
 .../src/rdkafka_subscription.c                  |  187 -
 .../src/rdkafka_subscription.h                  |   31 -
 .../librdkafka-0.11.1/src/rdkafka_timer.c       |  292 --
 .../librdkafka-0.11.1/src/rdkafka_timer.h       |   77 -
 .../librdkafka-0.11.1/src/rdkafka_topic.c       | 1306 ------
 .../librdkafka-0.11.1/src/rdkafka_topic.h       |  185 -
 .../librdkafka-0.11.1/src/rdkafka_transport.c   | 1523 -------
 .../librdkafka-0.11.1/src/rdkafka_transport.h   |   72 -
 .../src/rdkafka_transport_int.h                 |   84 -
 thirdparty/librdkafka-0.11.1/src/rdlist.c       |  313 --
 thirdparty/librdkafka-0.11.1/src/rdlist.h       |  246 -
 thirdparty/librdkafka-0.11.1/src/rdlog.c        |   89 -
 thirdparty/librdkafka-0.11.1/src/rdlog.h        |   37 -
 thirdparty/librdkafka-0.11.1/src/rdports.c      |   60 -
 thirdparty/librdkafka-0.11.1/src/rdports.h      |   33 -
 thirdparty/librdkafka-0.11.1/src/rdposix.h      |  182 -
 thirdparty/librdkafka-0.11.1/src/rdrand.c       |   50 -
 thirdparty/librdkafka-0.11.1/src/rdrand.h       |   45 -
 thirdparty/librdkafka-0.11.1/src/rdregex.c      |  157 -
 thirdparty/librdkafka-0.11.1/src/rdregex.h      |   37 -
 thirdparty/librdkafka-0.11.1/src/rdsignal.h     |   54 -
 thirdparty/librdkafka-0.11.1/src/rdstring.c     |  162 -
 thirdparty/librdkafka-0.11.1/src/rdstring.h     |   52 -
 thirdparty/librdkafka-0.11.1/src/rdsysqueue.h   |  330 --
 thirdparty/librdkafka-0.11.1/src/rdtime.h       |  175 -
 thirdparty/librdkafka-0.11.1/src/rdtypes.h      |   42 -
 thirdparty/librdkafka-0.11.1/src/rdunittest.c   |   43 -
 thirdparty/librdkafka-0.11.1/src/rdunittest.h   |   83 -
 thirdparty/librdkafka-0.11.1/src/rdvarint.c     |  126 -
 thirdparty/librdkafka-0.11.1/src/rdvarint.h     |  169 -
 thirdparty/librdkafka-0.11.1/src/rdwin32.h      |  262 --
 thirdparty/librdkafka-0.11.1/src/regexp.c       | 1156 -----
 thirdparty/librdkafka-0.11.1/src/regexp.h       |   31 -
 thirdparty/librdkafka-0.11.1/src/snappy.c       | 1834 --------
 thirdparty/librdkafka-0.11.1/src/snappy.h       |   34 -
 .../librdkafka-0.11.1/src/snappy_compat.h       |  169 -
 thirdparty/librdkafka-0.11.1/src/tinycthread.c  | 1031 -----
 thirdparty/librdkafka-0.11.1/src/tinycthread.h  |  520 ---
 thirdparty/librdkafka-0.11.1/src/win32_config.h |   40 -
 thirdparty/librdkafka-0.11.1/src/xxhash.c       |  889 ----
 thirdparty/librdkafka-0.11.1/src/xxhash.h       |  293 --
 thirdparty/librdkafka-0.11.1/win32/.gitignore   |  109 -
 thirdparty/librdkafka-0.11.1/win32/README.md    |    5 -
 .../librdkafka-0.11.1/win32/build-package.bat   |    3 -
 thirdparty/librdkafka-0.11.1/win32/build.bat    |   19 -
 .../librdkafka-0.11.1/win32/common.vcxproj      |   76 -
 .../interceptor_test/interceptor_test.vcxproj   |   87 -
 .../win32/librdkafka.autopkg.template           |   55 -
 .../win32/librdkafka.master.testing.targets     |   13 -
 .../librdkafka-0.11.1/win32/librdkafka.sln      |  176 -
 .../librdkafka-0.11.1/win32/librdkafka.vcxproj  |  229 -
 .../win32/librdkafkacpp/librdkafkacpp.vcxproj   |  103 -
 .../librdkafka-0.11.1/win32/package-nuget.ps1   |   21 -
 .../librdkafka-0.11.1/win32/packages.config     |    6 -
 .../win32/packages/repositories.config          |    4 -
 .../librdkafka-0.11.1/win32/push-package.bat    |    4 -
 .../rdkafka_consumer_example_cpp.vcxproj        |   67 -
 .../rdkafka_example/rdkafka_example.vcxproj     |   97 -
 .../rdkafka_performance.vcxproj                 |   97 -
 .../librdkafka-0.11.1/win32/tests/.gitignore    |    3 -
 .../win32/tests/test.conf.example               |   25 -
 .../librdkafka-0.11.1/win32/tests/tests.vcxproj |  171 -
 thirdparty/librdkafka-0.11.1/win32/wingetopt.c  |  564 ---
 thirdparty/librdkafka-0.11.1/win32/wingetopt.h  |   95 -
 thirdparty/librdkafka-0.11.1/win32/wintime.h    |   29 -
 thirdparty/librdkafka-0.11.4/.appveyor.yml      |   88 +
 thirdparty/librdkafka-0.11.4/.dir-locals.el     |    3 +
 thirdparty/librdkafka-0.11.4/.doozer.json       |  110 +
 .../librdkafka-0.11.4/.github/ISSUE_TEMPLATE    |   32 +
 thirdparty/librdkafka-0.11.4/.gitignore         |   28 +
 thirdparty/librdkafka-0.11.4/.travis.yml        |   42 +
 thirdparty/librdkafka-0.11.4/CMakeLists.txt     |  182 +
 thirdparty/librdkafka-0.11.4/CODE_OF_CONDUCT.md |   46 +
 thirdparty/librdkafka-0.11.4/CONFIGURATION.md   |  138 +
 thirdparty/librdkafka-0.11.4/CONTRIBUTING.md    |  271 ++
 thirdparty/librdkafka-0.11.4/Doxyfile           | 2385 ++++++++++
 thirdparty/librdkafka-0.11.4/INTRODUCTION.md    |  735 +++
 thirdparty/librdkafka-0.11.4/LICENSE            |   25 +
 thirdparty/librdkafka-0.11.4/LICENSE.crc32c     |   28 +
 thirdparty/librdkafka-0.11.4/LICENSE.lz4        |   26 +
 thirdparty/librdkafka-0.11.4/LICENSE.murmur2    |   25 +
 thirdparty/librdkafka-0.11.4/LICENSE.pycrc      |   23 +
 thirdparty/librdkafka-0.11.4/LICENSE.queue      |   31 +
 thirdparty/librdkafka-0.11.4/LICENSE.regexp     |    5 +
 thirdparty/librdkafka-0.11.4/LICENSE.snappy     |   36 +
 .../librdkafka-0.11.4/LICENSE.tinycthread       |   26 +
 thirdparty/librdkafka-0.11.4/LICENSE.wingetopt  |   49 +
 thirdparty/librdkafka-0.11.4/LICENSES.txt       |  313 ++
 thirdparty/librdkafka-0.11.4/Makefile           |   68 +
 thirdparty/librdkafka-0.11.4/README.md          |  168 +
 thirdparty/librdkafka-0.11.4/README.win32       |   28 +
 thirdparty/librdkafka-0.11.4/configure          |  214 +
 .../librdkafka-0.11.4/configure.librdkafka      |  215 +
 thirdparty/librdkafka-0.11.4/dev-conf.sh        |   45 +
 .../librdkafka-0.11.4/examples/.gitignore       |    8 +
 .../librdkafka-0.11.4/examples/CMakeLists.txt   |   30 +
 thirdparty/librdkafka-0.11.4/examples/Makefile  |   96 +
 .../librdkafka-0.11.4/examples/globals.json     |   11 +
 .../examples/kafkatest_verifiable_client.cpp    |  960 ++++
 .../examples/rdkafka_consume_batch.cpp          |  260 ++
 .../examples/rdkafka_consumer_example.c         |  624 +++
 .../examples/rdkafka_consumer_example.cpp       |  485 ++
 .../examples/rdkafka_example.c                  |  885 ++++
 .../examples/rdkafka_example.cpp                |  645 +++
 .../examples/rdkafka_performance.c              | 1651 +++++++
 .../examples/rdkafka_simple_producer.c          |  260 ++
 .../examples/rdkafka_zookeeper_example.c        |  728 +++
 thirdparty/librdkafka-0.11.4/lds-gen.py         |   38 +
 thirdparty/librdkafka-0.11.4/mainpage.doxy      |   35 +
 .../librdkafka-0.11.4/mklove/Makefile.base      |  215 +
 .../mklove/modules/configure.atomics            |  144 +
 .../mklove/modules/configure.base               | 1771 ++++++++
 .../mklove/modules/configure.builtin            |   62 +
 .../mklove/modules/configure.cc                 |  178 +
 .../mklove/modules/configure.cxx                |    8 +
 .../mklove/modules/configure.fileversion        |   65 +
 .../mklove/modules/configure.gitversion         |   19 +
 .../mklove/modules/configure.good_cflags        |   18 +
 .../mklove/modules/configure.host               |  110 +
 .../mklove/modules/configure.lib                |   49 +
 .../mklove/modules/configure.parseversion       |   95 +
 .../mklove/modules/configure.pic                |   16 +
 .../mklove/modules/configure.socket             |   20 +
 .../librdkafka-0.11.4/packaging/RELEASE.md      |  137 +
 .../packaging/archlinux/PKGBUILD                |    5 +
 .../packaging/cmake/Config.cmake.in             |   20 +
 .../librdkafka-0.11.4/packaging/cmake/README.md |   38 +
 .../packaging/cmake/config.h.in                 |   40 +
 .../cmake/try_compile/atomic_32_test.c          |    8 +
 .../cmake/try_compile/atomic_64_test.c          |    8 +
 .../packaging/cmake/try_compile/dlopen_test.c   |   11 +
 .../packaging/cmake/try_compile/libsasl2_test.c |    7 +
 .../cmake/try_compile/rdkafka_setup.cmake       |   76 +
 .../packaging/cmake/try_compile/regex_test.c    |   10 +
 .../packaging/cmake/try_compile/strndup_test.c  |    5 +
 .../packaging/cmake/try_compile/sync_32_test.c  |    8 +
 .../packaging/cmake/try_compile/sync_64_test.c  |    8 +
 .../packaging/debian/.gitignore                 |    6 +
 .../packaging/debian/changelog                  |   66 +
 .../librdkafka-0.11.4/packaging/debian/compat   |    1 +
 .../librdkafka-0.11.4/packaging/debian/control  |   49 +
 .../packaging/debian/copyright                  |   84 +
 .../librdkafka-0.11.4/packaging/debian/docs     |    3 +
 .../librdkafka-0.11.4/packaging/debian/gbp.conf |    9 +
 .../packaging/debian/librdkafka-dev.dirs        |    2 +
 .../packaging/debian/librdkafka-dev.examples    |    2 +
 .../packaging/debian/librdkafka-dev.install     |    6 +
 .../packaging/debian/librdkafka-dev.substvars   |    1 +
 .../packaging/debian/librdkafka.dsc             |   16 +
 .../packaging/debian/librdkafka1-dbg.substvars  |    1 +
 .../packaging/debian/librdkafka1.dirs           |    1 +
 .../packaging/debian/librdkafka1.install        |    2 +
 .../debian/librdkafka1.postinst.debhelper       |    5 +
 .../debian/librdkafka1.postrm.debhelper         |    5 +
 .../packaging/debian/librdkafka1.symbols        |   64 +
 .../librdkafka-0.11.4/packaging/debian/rules    |   19 +
 .../packaging/debian/source/format              |    1 +
 .../librdkafka-0.11.4/packaging/debian/watch    |    2 +
 .../librdkafka-0.11.4/packaging/get_version.py  |   21 +
 .../packaging/homebrew/README.md                |   15 +
 .../packaging/homebrew/brew-update-pr.sh        |   31 +
 .../packaging/nuget/.gitignore                  |    5 +
 .../librdkafka-0.11.4/packaging/nuget/README.md |   50 +
 .../packaging/nuget/artifact.py                 |  173 +
 .../msvcr120.zip                                |  Bin 0 -> 520101 bytes
 .../msvcr120.zip                                |  Bin 0 -> 461473 bytes
 .../librdkafka-0.11.4/packaging/nuget/nuget.sh  |   21 +
 .../packaging/nuget/packaging.py                |  421 ++
 .../packaging/nuget/release.py                  |   83 +
 .../packaging/nuget/requirements.txt            |    2 +
 .../nuget/templates/librdkafka.redist.nuspec    |   21 +
 .../nuget/templates/librdkafka.redist.props     |   18 +
 .../nuget/templates/librdkafka.redist.targets   |   19 +
 .../packaging/nuget/zfile/__init__.py           |    0
 .../packaging/nuget/zfile/zfile.py              |  100 +
 .../librdkafka-0.11.4/packaging/rpm/.gitignore  |    3 +
 .../librdkafka-0.11.4/packaging/rpm/Makefile    |   81 +
 .../packaging/rpm/el7-x86_64.cfg                |   40 +
 .../packaging/rpm/librdkafka.spec               |  104 +
 .../packaging/tools/build-debian.sh             |   53 +
 .../librdkafka-0.11.4/src-cpp/CMakeLists.txt    |   35 +
 .../librdkafka-0.11.4/src-cpp/ConfImpl.cpp      |   89 +
 .../librdkafka-0.11.4/src-cpp/ConsumerImpl.cpp  |  233 +
 .../librdkafka-0.11.4/src-cpp/HandleImpl.cpp    |  365 ++
 .../src-cpp/KafkaConsumerImpl.cpp               |  257 ++
 thirdparty/librdkafka-0.11.4/src-cpp/Makefile   |   49 +
 .../librdkafka-0.11.4/src-cpp/MessageImpl.cpp   |   38 +
 .../librdkafka-0.11.4/src-cpp/MetadataImpl.cpp  |  151 +
 .../librdkafka-0.11.4/src-cpp/ProducerImpl.cpp  |  167 +
 .../librdkafka-0.11.4/src-cpp/QueueImpl.cpp     |   71 +
 thirdparty/librdkafka-0.11.4/src-cpp/README.md  |   16 +
 .../librdkafka-0.11.4/src-cpp/RdKafka.cpp       |   52 +
 .../librdkafka-0.11.4/src-cpp/TopicImpl.cpp     |  128 +
 .../src-cpp/TopicPartitionImpl.cpp              |   55 +
 .../librdkafka-0.11.4/src-cpp/rdkafkacpp.h      | 2284 ++++++++++
 .../librdkafka-0.11.4/src-cpp/rdkafkacpp_int.h  |  910 ++++
 thirdparty/librdkafka-0.11.4/src/CMakeLists.txt |  184 +
 thirdparty/librdkafka-0.11.4/src/Makefile       |   82 +
 thirdparty/librdkafka-0.11.4/src/crc32c.c       |  438 ++
 thirdparty/librdkafka-0.11.4/src/crc32c.h       |   38 +
 .../src/librdkafka_cgrp_synch.png               |  Bin 0 -> 93796 bytes
 thirdparty/librdkafka-0.11.4/src/lz4.c          | 1462 ++++++
 thirdparty/librdkafka-0.11.4/src/lz4.h          |  463 ++
 thirdparty/librdkafka-0.11.4/src/lz4frame.c     | 1440 ++++++
 thirdparty/librdkafka-0.11.4/src/lz4frame.h     |  367 ++
 .../librdkafka-0.11.4/src/lz4frame_static.h     |   98 +
 thirdparty/librdkafka-0.11.4/src/lz4hc.c        |  786 ++++
 thirdparty/librdkafka-0.11.4/src/lz4hc.h        |  269 ++
 thirdparty/librdkafka-0.11.4/src/lz4opt.h       |  360 ++
 thirdparty/librdkafka-0.11.4/src/queue.h        |  850 ++++
 thirdparty/librdkafka-0.11.4/src/rd.h           |  457 ++
 thirdparty/librdkafka-0.11.4/src/rdaddr.c       |  220 +
 thirdparty/librdkafka-0.11.4/src/rdaddr.h       |  187 +
 thirdparty/librdkafka-0.11.4/src/rdatomic.h     |  191 +
 thirdparty/librdkafka-0.11.4/src/rdavg.h        |   97 +
 thirdparty/librdkafka-0.11.4/src/rdavl.c        |  214 +
 thirdparty/librdkafka-0.11.4/src/rdavl.h        |  256 ++
 thirdparty/librdkafka-0.11.4/src/rdbuf.c        | 1550 +++++++
 thirdparty/librdkafka-0.11.4/src/rdbuf.h        |  325 ++
 thirdparty/librdkafka-0.11.4/src/rdcrc32.c      |  113 +
 thirdparty/librdkafka-0.11.4/src/rdcrc32.h      |  146 +
 thirdparty/librdkafka-0.11.4/src/rddl.c         |  179 +
 thirdparty/librdkafka-0.11.4/src/rddl.h         |   41 +
 thirdparty/librdkafka-0.11.4/src/rdendian.h     |  169 +
 thirdparty/librdkafka-0.11.4/src/rdgz.c         |  124 +
 thirdparty/librdkafka-0.11.4/src/rdgz.h         |   45 +
 thirdparty/librdkafka-0.11.4/src/rdinterval.h   |  117 +
 thirdparty/librdkafka-0.11.4/src/rdkafka.c      | 3518 +++++++++++++++
 thirdparty/librdkafka-0.11.4/src/rdkafka.h      | 4211 ++++++++++++++++++
 .../librdkafka-0.11.4/src/rdkafka_assignor.c    |  551 +++
 .../librdkafka-0.11.4/src/rdkafka_assignor.h    |  159 +
 .../librdkafka-0.11.4/src/rdkafka_broker.c      | 4038 +++++++++++++++++
 .../librdkafka-0.11.4/src/rdkafka_broker.h      |  361 ++
 thirdparty/librdkafka-0.11.4/src/rdkafka_buf.c  |  451 ++
 thirdparty/librdkafka-0.11.4/src/rdkafka_buf.h  |  946 ++++
 thirdparty/librdkafka-0.11.4/src/rdkafka_cgrp.c | 3262 ++++++++++++++
 thirdparty/librdkafka-0.11.4/src/rdkafka_cgrp.h |  278 ++
 thirdparty/librdkafka-0.11.4/src/rdkafka_conf.c | 2248 ++++++++++
 thirdparty/librdkafka-0.11.4/src/rdkafka_conf.h |  350 ++
 .../librdkafka-0.11.4/src/rdkafka_event.c       |  232 +
 .../librdkafka-0.11.4/src/rdkafka_event.h       |   81 +
 .../librdkafka-0.11.4/src/rdkafka_feature.c     |  444 ++
 .../librdkafka-0.11.4/src/rdkafka_feature.h     |   82 +
 .../librdkafka-0.11.4/src/rdkafka_header.c      |  222 +
 .../librdkafka-0.11.4/src/rdkafka_header.h      |   76 +
 thirdparty/librdkafka-0.11.4/src/rdkafka_int.h  |  446 ++
 .../librdkafka-0.11.4/src/rdkafka_interceptor.c |  675 +++
 .../librdkafka-0.11.4/src/rdkafka_interceptor.h |   80 +
 thirdparty/librdkafka-0.11.4/src/rdkafka_lz4.c  |  436 ++
 thirdparty/librdkafka-0.11.4/src/rdkafka_lz4.h  |   43 +
 .../librdkafka-0.11.4/src/rdkafka_metadata.c    | 1031 +++++
 .../librdkafka-0.11.4/src/rdkafka_metadata.h    |  160 +
 .../src/rdkafka_metadata_cache.c                |  732 +++
 thirdparty/librdkafka-0.11.4/src/rdkafka_msg.c  | 1277 ++++++
 thirdparty/librdkafka-0.11.4/src/rdkafka_msg.h  |  381 ++
 .../librdkafka-0.11.4/src/rdkafka_msgset.h      |   50 +
 .../src/rdkafka_msgset_reader.c                 | 1137 +++++
 .../src/rdkafka_msgset_writer.c                 | 1226 +++++
 .../librdkafka-0.11.4/src/rdkafka_offset.c      | 1145 +++++
 .../librdkafka-0.11.4/src/rdkafka_offset.h      |   74 +
 thirdparty/librdkafka-0.11.4/src/rdkafka_op.c   |  660 +++
 thirdparty/librdkafka-0.11.4/src/rdkafka_op.h   |  403 ++
 .../librdkafka-0.11.4/src/rdkafka_partition.c   | 3363 ++++++++++++++
 .../librdkafka-0.11.4/src/rdkafka_partition.h   |  641 +++
 .../librdkafka-0.11.4/src/rdkafka_pattern.c     |  224 +
 .../librdkafka-0.11.4/src/rdkafka_pattern.h     |   68 +
 .../librdkafka-0.11.4/src/rdkafka_plugin.c      |  209 +
 .../librdkafka-0.11.4/src/rdkafka_plugin.h      |   37 +
 .../librdkafka-0.11.4/src/rdkafka_proto.h       |  502 +++
 .../librdkafka-0.11.4/src/rdkafka_queue.c       |  866 ++++
 .../librdkafka-0.11.4/src/rdkafka_queue.h       |  769 ++++
 .../src/rdkafka_range_assignor.c                |  125 +
 .../librdkafka-0.11.4/src/rdkafka_request.c     | 1997 +++++++++
 .../librdkafka-0.11.4/src/rdkafka_request.h     |  198 +
 .../src/rdkafka_roundrobin_assignor.c           |  114 +
 thirdparty/librdkafka-0.11.4/src/rdkafka_sasl.c |  343 ++
 thirdparty/librdkafka-0.11.4/src/rdkafka_sasl.h |   49 +
 .../librdkafka-0.11.4/src/rdkafka_sasl_cyrus.c  |  623 +++
 .../librdkafka-0.11.4/src/rdkafka_sasl_int.h    |   72 +
 .../librdkafka-0.11.4/src/rdkafka_sasl_plain.c  |  128 +
 .../librdkafka-0.11.4/src/rdkafka_sasl_scram.c  |  901 ++++
 .../librdkafka-0.11.4/src/rdkafka_sasl_win32.c  |  526 +++
 .../src/rdkafka_subscription.c                  |  186 +
 .../librdkafka-0.11.4/src/rdkafka_timer.c       |  292 ++
 .../librdkafka-0.11.4/src/rdkafka_timer.h       |   80 +
 .../librdkafka-0.11.4/src/rdkafka_topic.c       | 1310 ++++++
 .../librdkafka-0.11.4/src/rdkafka_topic.h       |  188 +
 .../librdkafka-0.11.4/src/rdkafka_transport.c   | 1607 +++++++
 .../librdkafka-0.11.4/src/rdkafka_transport.h   |   79 +
 .../src/rdkafka_transport_int.h                 |   87 +
 thirdparty/librdkafka-0.11.4/src/rdlist.c       |  333 ++
 thirdparty/librdkafka-0.11.4/src/rdlist.h       |  269 ++
 thirdparty/librdkafka-0.11.4/src/rdlog.c        |   89 +
 thirdparty/librdkafka-0.11.4/src/rdlog.h        |   40 +
 thirdparty/librdkafka-0.11.4/src/rdmurmur2.c    |  159 +
 thirdparty/librdkafka-0.11.4/src/rdmurmur2.h    |    7 +
 thirdparty/librdkafka-0.11.4/src/rdports.c      |   60 +
 thirdparty/librdkafka-0.11.4/src/rdports.h      |   36 +
 thirdparty/librdkafka-0.11.4/src/rdposix.h      |  184 +
 thirdparty/librdkafka-0.11.4/src/rdrand.c       |   50 +
 thirdparty/librdkafka-0.11.4/src/rdrand.h       |   48 +
 thirdparty/librdkafka-0.11.4/src/rdregex.c      |  157 +
 thirdparty/librdkafka-0.11.4/src/rdregex.h      |   40 +
 thirdparty/librdkafka-0.11.4/src/rdsignal.h     |   57 +
 thirdparty/librdkafka-0.11.4/src/rdstring.c     |  204 +
 thirdparty/librdkafka-0.11.4/src/rdstring.h     |   59 +
 thirdparty/librdkafka-0.11.4/src/rdsysqueue.h   |  348 ++
 thirdparty/librdkafka-0.11.4/src/rdtime.h       |  184 +
 thirdparty/librdkafka-0.11.4/src/rdtypes.h      |   45 +
 thirdparty/librdkafka-0.11.4/src/rdunittest.c   |   63 +
 thirdparty/librdkafka-0.11.4/src/rdunittest.h   |   83 +
 thirdparty/librdkafka-0.11.4/src/rdvarint.c     |  126 +
 thirdparty/librdkafka-0.11.4/src/rdvarint.h     |  169 +
 thirdparty/librdkafka-0.11.4/src/rdwin32.h      |  265 ++
 thirdparty/librdkafka-0.11.4/src/regexp.c       | 1156 +++++
 thirdparty/librdkafka-0.11.4/src/regexp.h       |   31 +
 thirdparty/librdkafka-0.11.4/src/snappy.c       | 1838 ++++++++
 thirdparty/librdkafka-0.11.4/src/snappy.h       |   34 +
 .../librdkafka-0.11.4/src/snappy_compat.h       |  169 +
 thirdparty/librdkafka-0.11.4/src/tinycthread.c  | 1039 +++++
 thirdparty/librdkafka-0.11.4/src/tinycthread.h  |  528 +++
 thirdparty/librdkafka-0.11.4/src/win32_config.h |   45 +
 thirdparty/librdkafka-0.11.4/src/xxhash.c       |  889 ++++
 thirdparty/librdkafka-0.11.4/src/xxhash.h       |  293 ++
 532 files changed, 89176 insertions(+), 87272 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7699d1f..287e44d 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -250,7 +250,7 @@ endif()
 ## Create LibRdKafka Extension
 option(ENABLE_LIBRDKAFKA "Enables the librdkafka extension." OFF)
 if (ENABLE_ALL OR ENABLE_LIBRDKAFKA)
-	createExtension(RDKAFKA-EXTENSIONS "RDKAFKA EXTENSIONS" "This Enables librdkafka functionality including PublishKafka" "extensions/librdkafka" "${TEST_DIR}/kafka-tests" "TRUE" "thirdparty/librdkafka-0.11.1")
+	createExtension(RDKAFKA-EXTENSIONS "RDKAFKA EXTENSIONS" "This Enables librdkafka functionality including PublishKafka" "extensions/librdkafka" "${TEST_DIR}/kafka-tests" "TRUE" "thirdparty/librdkafka-0.11.4")
 endif()
 
 ## Scripting extensions

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/libminifi/test/kafka-tests/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/libminifi/test/kafka-tests/CMakeLists.txt b/libminifi/test/kafka-tests/CMakeLists.txt
index 1e465b1..5172ca2 100644
--- a/libminifi/test/kafka-tests/CMakeLists.txt
+++ b/libminifi/test/kafka-tests/CMakeLists.txt
@@ -24,8 +24,8 @@ FOREACH(testfile ${KAFKA_INTEGRATION_TESTS})
 	get_filename_component(testfilename "${testfile}" NAME_WE)
 	add_executable("${testfilename}" "${testfile}")
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/extensions/librdkafka")
-	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/librdkafka-0.11.1/src")
-	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/librdkafka-0.11.1/src-cpp")
+	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/librdkafka-0.11.4/src")
+	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/librdkafka-0.11.4/src-cpp")
 	createTests("${testfilename}")
 	target_link_libraries(${testfilename} ${CATCH_MAIN_LIB})
 	if (APPLE)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/.appveyor.yml
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/.appveyor.yml b/thirdparty/librdkafka-0.11.1/.appveyor.yml
deleted file mode 100644
index aac3a68..0000000
--- a/thirdparty/librdkafka-0.11.1/.appveyor.yml
+++ /dev/null
@@ -1,88 +0,0 @@
-version: 0.11.0-R-post{build}
-pull_requests:
-  do_not_increment_build_number: true
-image: Visual Studio 2013
-configuration: Release
-environment:
-  matrix:
-  - platform: x64
-  - platform: win32
-install:
-- ps: "$OpenSSLVersion = \"1_0_2L\"\n$OpenSSLExe = \"OpenSSL-$OpenSSLVersion.exe\"\n\nRemove-Item C:\\OpenSSL-Win32 -recurse\nRemove-Item C:\\OpenSSL-Win64 -recurse\n\nWrite-Host \"Installing OpenSSL v1.0 32-bit ...\" -ForegroundColor Cyan\nWrite-Host \"Downloading...\"\n$exePath = \"$($env:USERPROFILE)\\Win32OpenSSL-1_0_2L.exe\"\n(New-Object Net.WebClient).DownloadFile('https://slproweb.com/download/Win32OpenSSL-1_0_2L.exe', $exePath)\nWrite-Host \"Installing...\"\ncmd /c start /wait $exePath /silent /verysilent /sp- /suppressmsgboxes /DIR=C:\\OpenSSL-Win32\nWrite-Host \"Installed\" -ForegroundColor Green\n\nWrite-Host \"Installing OpenSSL v1.0 64-bit ...\" -ForegroundColor Cyan\nWrite-Host \"Downloading...\"\n$exePath = \"$($env:USERPROFILE)\\Win64OpenSSL-1_0_2L.exe\"\n(New-Object Net.WebClient).DownloadFile('https://slproweb.com/download/Win64OpenSSL-1_0_2L.exe', $exePath)\nWrite-Host \"Installing...\"\ncmd /c start /wait $exePath /silent /verysilent /sp- /suppressmsgboxes /DIR=C
 :\\OpenSSL-Win64\nWrite-Host \"Installed\" -ForegroundColor Green\n\nif (!(Test-Path(\"C:\\OpenSSL-Win32\"))) {\n  echo \"Downloading https://slproweb.com/download/Win32$OpenSSLExe\"\n  Start-FileDownload 'https://slproweb.com/download/Win32$OpenSSLExe'\n  Start-Process \"Win32$OpenSSLExe\" -ArgumentList \"/silent /verysilent /sp- /suppressmsgboxes\" -Wait\n} else {\n   echo \"OpenSSL-Win32 already exists: not downloading\"\n}\n\nif (!(Test-Path(\"C:\\OpenSSL-Win64\"))) {\n  echo \"Downloading https://slproweb.com/download/Win64$OpenSSLExe\"\n  Start-FileDownload 'https://slproweb.com/download/Win64$OpenSSLExe' \n  Start-Process \"Win64$OpenSSLExe\" -ArgumentList \"/silent /verysilent /sp- /suppressmsgboxes\" -Wait\n} else {\n   echo \"OpenSSL-Win64 already exists: not downloading\"\n}\n\n\n\n# Download the CoApp tools.\n$msiPath = \"$($env:USERPROFILE)\\CoApp.Tools.Powershell.msi\"\n(New-Object Net.WebClient).DownloadFile('http://coapp.org/files/CoApp.Tools.Powershell.msi', $msiPat
 h)\n\n# Install the CoApp tools from the downloaded .msi.\nStart-Process -FilePath msiexec -ArgumentList /i, $msiPath, /quiet -Wait\n\n# Make the tools available for later PS scripts to use.\n$env:PSModulePath = $env:PSModulePath + ';C:\\Program Files (x86)\\Outercurve Foundation\\Modules'\nImport-Module CoApp\n\n# Install NuGet\n#Install-PackageProvider NuGet -MinimumVersion '2.8.5.201' -Force\n#Import-PackageProvider NuGet -MinimumVersion '2.8.5.201' -Force\n\n# Install CoApp for creating nuget packages\n#$msiPath = \"$($env:USERPROFILE)\\CoApp.Tools.Powershell.msi\"\n#(New-Object #Net.WebClient).DownloadFile('http://downloads.coapp.org/files/CoApp.Tools.Powershell.msi', $msiPath)\n#cmd /c start /wait msiexec /i \"$msiPath\" /quiet\n\n# Install CoApp module\n#Install-Module CoApp -Force"
-cache:
-- c:\OpenSSL-Win32
-- c:\OpenSSL-Win64
-nuget:
-  account_feed: true
-  project_feed: true
-  disable_publish_on_pr: true
-before_build:
-- cmd: nuget restore win32/librdkafka.sln
-build:
-  project: win32/librdkafka.sln
-  publish_nuget: true
-  publish_nuget_symbols: true
-  include_nuget_references: true
-  parallel: true
-  verbosity: normal
-test_script:
-- cmd: if exist DISABLED\win32\outdir\v140 ( win32\outdir\v140\%PLATFORM%\%CONFIGURATION%\tests.exe -l -p1 ) else ( win32\outdir\v120\%PLATFORM%\%CONFIGURATION%\tests.exe -l -p1 )
-artifacts:
-- path: test_report*.json
-  name: Test report
-- path: '*.nupkg'
-  name: Packages
-- path: '**\*.dll'
-  name: Libraries
-- path: '**\*.lib'
-  name: Libraries
-- path: '**\*.pdb'
-  name: Libraries
-- path: '**\*.exe'
-  name: Executables
-before_deploy:
-- ps: >-
-    # FIXME: Add to Deployment condition above:
-
-    # APPVEYOR_REPO_TAG = true
-
-
-
-    # This is the CoApp .autopkg file to create.
-
-    $autopkgFile = "win32/librdkafka.autopkg"
-
-
-    # Get the ".autopkg.template" file, replace "@version" with the Appveyor version number, then save to the ".autopkg" file.
-
-    cat ($autopkgFile + ".template") | % { $_ -replace "@version", $env:appveyor_build_version } > $autopkgFile
-
-
-    # Use the CoApp tools to create NuGet native packages from the .autopkg.
-
-    Write-NuGetPackage $autopkgFile
-
-
-    # Push all newly created .nupkg files as Appveyor artifacts for later deployment.
-
-    Get-ChildItem .\*.nupkg | % { Push-AppveyorArtifact $_.FullName -FileName $_.Name }
-deploy:
-- provider: S3
-  access_key_id:
-    secure: BDJ8FdNEzVKO7MTZX50dWIvrBEPOl9oHSRQ9S/s3uu0=
-  secret_access_key:
-    secure: GBmNSeDpUa7hqqTJBT+PLMgsZCxQMBau+6Vnitzqw15UlpuaZRzid1s5egIacZO6
-  region: us-west-1
-  bucket: librdkafka-ci-packages
-  folder: librdkafka/p-librdkafka__bld-appveyor__plat-windows__arch-$(platform)__bldtype-$(configuration)__tag-$(APPVEYOR_REPO_TAG_NAME)__sha-$(APPVEYOR_REPO_COMMIT)__bid-$(APPVEYOR_BUILD_ID)
-  artifact: /.*\.(nupkg)/
-  max_error_retry: 3
-  on:
-    APPVEYOR_REPO_TAG: true
-notifications:
-- provider: Email
-  to:
-  - magnus@edenhill.se
-  on_build_success: false
-  on_build_failure: true
-  on_build_status_changed: true

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/.dir-locals.el
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/.dir-locals.el b/thirdparty/librdkafka-0.11.1/.dir-locals.el
deleted file mode 100644
index 22ca922..0000000
--- a/thirdparty/librdkafka-0.11.1/.dir-locals.el
+++ /dev/null
@@ -1,3 +0,0 @@
-( (c-mode . ((c-file-style . "linux"))) )
-((nil . ((compile-command . "LC_ALL=C make -C $(git rev-parse --show-toplevels) -k"))))
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/.doozer.json
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/.doozer.json b/thirdparty/librdkafka-0.11.1/.doozer.json
deleted file mode 100644
index d12f1a4..0000000
--- a/thirdparty/librdkafka-0.11.1/.doozer.json
+++ /dev/null
@@ -1,88 +0,0 @@
-{
-  "targets": {
-    "xenial-amd64": {
-
-      "buildenv": "xenial-amd64",
-      "builddeps": [
-        "build-essential",
-        "python",
-        "zlib1g-dev",
-        "libssl-dev",
-        "libsasl2-dev"
-      ],
-      "buildcmd": [
-        "./configure",
-          "make -j ${PARALLEL}",
-          "make -C tests build"
-      ],
-      "testcmd": [
-          "make -C tests run_local"
-      ],
-    },
-
-    "xenial-i386": {
-      "_comment": "including liblz4-dev here to verify that WITH_LZ4_EXT works",
-      "buildenv": "xenial-i386",
-      "builddeps": [
-        "build-essential",
-        "python",
-        "zlib1g-dev",
-        "libssl-dev",
-        "libsasl2-dev",
-        "liblz4-dev"
-      ],
-      "buildcmd": [
-        "./configure",
-        "make -j ${PARALLEL}",
-        "make -C tests build"
-      ],
-      "testcmd": [
-        "make -C tests run_local"
-      ],
-    },
-
-    "xenial-armhf": {
-
-      "buildenv": "xenial-armhf",
-      "builddeps": [
-        "build-essential",
-        "python",
-        "zlib1g-dev",
-        "libssl-dev",
-        "libsasl2-dev"
-      ],
-      "buildcmd": [
-        "./configure",
-        "make -j ${PARALLEL}",
-          "make -j ${PARALLEL} -C tests build",
-      ],
-      "testcmd": [
-        "cd tests",
-        "./run-test.sh -p1 -l ./merged",
-        "cd .."
-      ],
-    },
-
-    "cmake-xenial-amd64": {
-
-      "buildenv": "xenial-amd64",
-      "builddeps": [
-        "build-essential",
-        "python",
-        "zlib1g-dev",
-        "libssl-dev",
-        "libsasl2-dev",
-        "cmake"
-      ],
-      "buildcmd": [
-        "cmake -H. -B_builds -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_BUILD_TYPE=Debug",
-        "cmake --build _builds",
-      ],
-      "testcmd": [
-        "cd _builds",
-        "ctest -VV -R RdKafkaTestBrokerLess"
-      ],
-    }
-  },
-  "artifacts": ["config.log", "Makefile.config", "config.h"]
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/.github/ISSUE_TEMPLATE
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/.github/ISSUE_TEMPLATE b/thirdparty/librdkafka-0.11.1/.github/ISSUE_TEMPLATE
deleted file mode 100644
index e36b5bc..0000000
--- a/thirdparty/librdkafka-0.11.1/.github/ISSUE_TEMPLATE
+++ /dev/null
@@ -1,26 +0,0 @@
-Description
-===========
-
-
-
-
-How to reproduce
-================
-
-
-
-
-Checklist
-=========
-Please provide the following information:
-
- - [ ] librdkafka version (release number or git tag):
- - [ ] Apache Kafka version: 
- - [ ] librdkafka client configuration:
- - [ ] Operating system:
- - [ ] Using the legacy Consumer
- - [ ] Using the high-level KafkaConsumer
- - [ ] Provide logs (with `debug=..` as necessary) from librdkafka
- - [ ] Provide broker log excerpts
- - [ ] Critical issue
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/.gitignore
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/.gitignore b/thirdparty/librdkafka-0.11.1/.gitignore
deleted file mode 100644
index 4a46417..0000000
--- a/thirdparty/librdkafka-0.11.1/.gitignore
+++ /dev/null
@@ -1,29 +0,0 @@
-config.h
-config.log*
-config.cache
-Makefile.config
-rdkafka*.pc
-*~
-\#*
-*.o
-*.so
-*.so.?
-*.dylib
-*.a
-*.d
-librdkafka*.lds
-core
-vgcore.*
-*dSYM/
-*.offset
-SOURCES
-gmon.out
-*.zip
-*.gz
-*.bz2
-*.deb
-*.rpm
-staging-docs
-tmp
-stats*.json
-test_report*.json

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/.travis.yml
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/.travis.yml b/thirdparty/librdkafka-0.11.1/.travis.yml
deleted file mode 100644
index 59d7af7..0000000
--- a/thirdparty/librdkafka-0.11.1/.travis.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-language: c
-cache: ccache
-env:
-- ARCH=x64
-compiler:
-- gcc
-- clang
-os:
-- linux
-- osx
-dist: trusty
-sudo: false
-before_install:
-  - if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then sudo make -C packaging/rpm MOCK_CONFIG=el7-x86_64 prepare_ubuntu ; fi
-before_script:
- - ccache -s || echo "CCache is not available."
-script:
-- rm -rf artifacts dest
-- mkdir dest artifacts
-- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then CPPFLAGS="-I/usr/local/opt/openssl/include
-  -L/usr/local/opt/openssl/lib" ./configure --enable-static --disable-lz4 --prefix="$PWD/dest" ; else ./configure --enable-static --disable-lz4 --prefix="$PWD/dest" ; fi
-- make -j2 all examples check && make -C tests run_local
-- make install
-- (cd dest && tar cvzf ../artifacts/librdkafka.tar.gz .)
-- if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then sudo make -C packaging/rpm MOCK_CONFIG=el7-x86_64 all copy-artifacts ; fi
-deploy:
-  provider: s3
-  access_key_id:
-    secure: "i7IAO5z6xYeVaOnFiJOz0wluH7CfGf39TiABQG/yQIIp53fRu+X94FBUS7xq2PziKV08ak8pNWWjenwRB/viPTGqFQDS/ypymhSGGQVTmhgG372ypODhqbnvzskMNe1wp+UFIl8LFVEaHMVPEwWVuYpXhWNMHGrr4uSQhF+5r0Y="
-  secret_access_key:
-    secure: q7rWZkwQqyIFu3i32y/rJsKF1AJpwIqQe3AhBFEqjCz80mdEYysvcHBfl956EAAr4qbR3umIKbkhRwbBVyDGlMaraZiR7YFgAO+v0DRFeMiF5XFE3KHNIpCQHSKnzOiL65TpJHZIoB+6w0lAbBVzj87MBN0axAeArHKiO1y8Eec=
-  bucket: librdkafka-ci-packages
-  region: us-west-1
-  skip_cleanup: true
-  local-dir: artifacts
-  upload-dir: librdkafka/p-librdkafka__bld-travis__plat-${TRAVIS_OS_NAME}__arch-${ARCH}__tag-${TRAVIS_TAG}__sha-${TRAVIS_COMMIT}__bid-${TRAVIS_JOB_NUMBER}
-  on:
-    condition: "$CC = gcc"
-    repo: edenhill/librdkafka
-    all_branches: true
-    tags: true

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/CMakeLists.txt b/thirdparty/librdkafka-0.11.1/CMakeLists.txt
deleted file mode 100644
index 2ae6377..0000000
--- a/thirdparty/librdkafka-0.11.1/CMakeLists.txt
+++ /dev/null
@@ -1,168 +0,0 @@
-cmake_minimum_required(VERSION 3.4)
-project(libRdKafka)
-
-# Options. No 'RDKAFKA_' prefix to match old C++ code. {
-
-# This option doesn't affect build in fact, only C code
-# (see 'rd_kafka_version_str'). In CMake the build type feature usually used
-# (like Debug, Release, etc.).
-option(WITHOUT_OPTIMIZATION "Disable optimization" OFF)
-
-option(ENABLE_DEVEL "Enable development asserts, checks, etc" OFF)
-option(ENABLE_REFCNT_DEBUG "Enable refcnt debugging" OFF)
-option(ENABLE_SHAREDPTR_DEBUG "Enable sharedptr debugging" OFF)
-
-# ZLIB {
-find_package(ZLIB QUIET)
-if(ZLIB_FOUND)
-  set(with_zlib_default ON)
-else()
-  set(with_zlib_default OFF)
-endif()
-option(WITH_ZLIB "With ZLIB" ${with_zlib_default})
-# }
-
-# LibDL {
-find_package(LIBDL QUIET)
-if(LIBDL_FOUND)
-  set(with_libdl_default ON)
-else()
-  set(with_libdl_default OFF)
-endif()
-option(WITH_LIBDL "With LibDL" ${with_libdl_default})
-# }
-
-# WITH_PLUGINS {
-if(WITH_LIBDL)
-  set(with_plugins_default ON)
-else()
-  set(with_plugins_default OFF)
-endif()
-option(WITH_PLUGINS "With plugin support" ${with_plugins_default})
-# }
-
-# OpenSSL {
-if(WITH_BUNDLED_SSL) # option from 'h2o' parent project
-  set(with_ssl_default ON)
-else()
-  find_package(OpenSSL QUIET)
-  if(OpenSSL_FOUND)
-    set(with_ssl_default ON)
-  else()
-    set(with_ssl_default OFF)
-  endif()
-endif()
-option(WITH_SSL "With SSL" ${with_ssl_default})
-# }
-
-# SASL {
-if(WIN32)
-  set(with_sasl_default ON)
-else()
-  include(FindPkgConfig)
-  pkg_check_modules(SASL QUIET libsasl2)
-  if(SASL_FOUND)
-    set(with_sasl_default ON)
-  else()
-    set(with_sasl_default OFF)
-  endif()
-endif()
-option(WITH_SASL "With SASL" ${with_sasl_default})
-if(WITH_SASL)
-  if(WIN32)
-    set(WITH_SASL_SCRAM ON)
-  else()
-    set(WITH_SASL_CYRUS ON)
-  endif()
-endif()
-# }
-
-# }
-
-option(RDKAFKA_BUILD_EXAMPLES "Build examples" OFF)
-option(RDKAFKA_BUILD_TESTS "Build tests" OFF)
-
-set(TRYCOMPILE_SRC_DIR "${CMAKE_CURRENT_LIST_DIR}/packaging/cmake/try_compile")
-
-# In:
-# * TRYCOMPILE_SRC_DIR
-# Out:
-# * HAVE_ATOMICS_32
-# * HAVE_ATOMICS_32_SYNC
-# * HAVE_ATOMICS_64
-# * HAVE_ATOMICS_64_SYNC
-# * HAVE_REGEX
-# * HAVE_STRNDUP
-# * LINK_ATOMIC
-include("packaging/cmake/try_compile/rdkafka_setup.cmake")
-
-set(GENERATED_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
-
-# In:
-# * WITHOUT_OPTIMIZATION
-# * ENABLE_DEVEL
-# * ENABLE_REFCNT_DEBUG
-# * ENABLE_SHAREDPTR_DEBUG
-# * HAVE_ATOMICS_32
-# * HAVE_ATOMICS_32_SYNC
-# * HAVE_ATOMICS_64
-# * HAVE_ATOMICS_64_SYNC
-# * WITH_ZLIB
-# * WITH_SSL
-# * WITH_SASL
-# * HAVE_REGEX
-# * HAVE_STRNDUP
-configure_file(config.h.in "${GENERATED_DIR}/config.h" @ONLY)
-
-# Installation (https://github.com/forexample/package-example) {
-
-include(GNUInstallDirs)
-
-set(config_install_dir "lib/cmake/${PROJECT_NAME}")
-
-set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
-
-set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake")
-set(targets_export_name "${PROJECT_NAME}Targets")
-set(namespace "${PROJECT_NAME}::")
-
-include(CMakePackageConfigHelpers)
-
-# In:
-#   * targets_export_name
-#   * PROJECT_NAME
-configure_package_config_file(
-    "packaging/cmake/Config.cmake.in"
-    "${project_config}"
-    INSTALL_DESTINATION "${config_install_dir}"
-)
-
-install(
-    FILES "${project_config}"
-    DESTINATION "${config_install_dir}"
-)
-
-install(
-    EXPORT "${targets_export_name}"
-    NAMESPACE "${namespace}"
-    DESTINATION "${config_install_dir}"
-)
-
-install(
-    FILES LICENSES.txt
-    DESTINATION "share/licenses/librdkafka"
-)
-
-# }
-
-add_subdirectory(src)
-add_subdirectory(src-cpp)
-
-if(RDKAFKA_BUILD_EXAMPLES)
-  add_subdirectory(examples)
-endif()
-
-if(RDKAFKA_BUILD_TESTS)
-  enable_testing()
-  add_subdirectory(tests)
-endif()

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/CONFIGURATION.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/CONFIGURATION.md b/thirdparty/librdkafka-0.11.1/CONFIGURATION.md
deleted file mode 100644
index f58dfbd..0000000
--- a/thirdparty/librdkafka-0.11.1/CONFIGURATION.md
+++ /dev/null
@@ -1,128 +0,0 @@
-//@file
-## Global configuration properties
-
-Property                                 | C/P | Range           |       Default | Description              
------------------------------------------|-----|-----------------|--------------:|--------------------------
-builtin.features                         |  *  |                 | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support. <br>*Type: CSV flags*
-client.id                                |  *  |                 |       rdkafka | Client identifier. <br>*Type: string*
-metadata.broker.list                     |  *  |                 |               | Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime. <br>*Type: string*
-bootstrap.servers                        |  *  |                 |               | Alias for `metadata.broker.list`
-message.max.bytes                        |  *  | 1000 .. 1000000000 |       1000000 | Maximum Kafka protocol request message size. <br>*Type: integer*
-message.copy.max.bytes                   |  *  | 0 .. 1000000000 |         65535 | Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs. <br>*Type: integer*
-receive.message.max.bytes                |  *  | 1000 .. 1000000000 |     100000000 | Maximum Kafka protocol response message size. This is a safety precaution to avoid memory exhaustion in case of protocol hickups. The value should be at least fetch.message.max.bytes * number of partitions consumed from + messaging overhead (e.g. 200000 bytes). <br>*Type: integer*
-max.in.flight.requests.per.connection    |  *  | 1 .. 1000000    |       1000000 | Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one. <br>*Type: integer*
-max.in.flight                            |  *  |                 |               | Alias for `max.in.flight.requests.per.connection`
-metadata.request.timeout.ms              |  *  | 10 .. 900000    |         60000 | Non-topic request timeout in milliseconds. This is for metadata requests, etc. <br>*Type: integer*
-topic.metadata.refresh.interval.ms       |  *  | -1 .. 3600000   |        300000 | Topic metadata refresh interval in milliseconds. The metadata is automatically refreshed on error and connect. Use -1 to disable the intervalled refresh. <br>*Type: integer*
-metadata.max.age.ms                      |  *  | 1 .. 86400000   |            -1 | Metadata cache max age. Defaults to metadata.refresh.interval.ms * 3 <br>*Type: integer*
-topic.metadata.refresh.fast.interval.ms  |  *  | 1 .. 60000      |           250 | When a topic loses its leader a new metadata request will be enqueued with this initial interval, exponentially increasing until the topic metadata has been refreshed. This is used to recover quickly from transitioning leader brokers. <br>*Type: integer*
-topic.metadata.refresh.fast.cnt          |  *  | 0 .. 1000       |            10 | *Deprecated: No longer used.* <br>*Type: integer*
-topic.metadata.refresh.sparse            |  *  | true, false     |          true | Sparse metadata requests (consumes less network bandwidth) <br>*Type: boolean*
-topic.blacklist                          |  *  |                 |               | Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist. <br>*Type: pattern list*
-debug                                    |  *  | generic, broker, topic, metadata, queue, msg, protocol, cgrp, security, fetch, feature, interceptor, plugin, all |               | A comma-separated list of debug contexts to enable. Debugging the Producer: broker,topic,msg. Consumer: cgrp,topic,fetch <br>*Type: CSV flags*
-socket.timeout.ms                        |  *  | 10 .. 300000    |         60000 | Timeout for network requests. <br>*Type: integer*
-socket.blocking.max.ms                   |  *  | 1 .. 60000      |          1000 | Maximum time a broker socket operation may block. A lower value improves responsiveness at the expense of slightly higher CPU usage. **Deprecated** <br>*Type: integer*
-socket.send.buffer.bytes                 |  *  | 0 .. 100000000  |             0 | Broker socket send buffer size. System default is used if 0. <br>*Type: integer*
-socket.receive.buffer.bytes              |  *  | 0 .. 100000000  |             0 | Broker socket receive buffer size. System default is used if 0. <br>*Type: integer*
-socket.keepalive.enable                  |  *  | true, false     |         false | Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets <br>*Type: boolean*
-socket.nagle.disable                     |  *  | true, false     |         false | Disable the Nagle algorithm (TCP_NODELAY). <br>*Type: boolean*
-socket.max.fails                         |  *  | 0 .. 1000000    |             3 | Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. NOTE: The connection is automatically re-established. <br>*Type: integer*
-broker.address.ttl                       |  *  | 0 .. 86400000   |          1000 | How long to cache the broker address resolving results (milliseconds). <br>*Type: integer*
-broker.address.family                    |  *  | any, v4, v6     |           any | Allowed broker IP address families: any, v4, v6 <br>*Type: enum value*
-reconnect.backoff.jitter.ms              |  *  | 0 .. 3600000    |           500 | Throttle broker reconnection attempts by this value +-50%. <br>*Type: integer*
-statistics.interval.ms                   |  *  | 0 .. 86400000   |             0 | librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics. <br>*Type: integer*
-enabled_events                           |  *  | 0 .. 2147483647 |             0 | See `rd_kafka_conf_set_events()` <br>*Type: integer*
-error_cb                                 |  *  |                 |               | Error callback (set with rd_kafka_conf_set_error_cb()) <br>*Type: pointer*
-throttle_cb                              |  *  |                 |               | Throttle callback (set with rd_kafka_conf_set_throttle_cb()) <br>*Type: pointer*
-stats_cb                                 |  *  |                 |               | Statistics callback (set with rd_kafka_conf_set_stats_cb()) <br>*Type: pointer*
-log_cb                                   |  *  |                 |               | Log callback (set with rd_kafka_conf_set_log_cb()) <br>*Type: pointer*
-log_level                                |  *  | 0 .. 7          |             6 | Logging level (syslog(3) levels) <br>*Type: integer*
-log.queue                                |  *  | true, false     |         false | Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set. <br>*Type: boolean*
-log.thread.name                          |  *  | true, false     |          true | Print internal thread name in log messages (useful for debugging librdkafka internals) <br>*Type: boolean*
-log.connection.close                     |  *  | true, false     |          true | Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connection.max.idle.ms` value. <br>*Type: boolean*
-socket_cb                                |  *  |                 |               | Socket creation callback to provide race-free CLOEXEC <br>*Type: pointer*
-connect_cb                               |  *  |                 |               | Socket connect callback <br>*Type: pointer*
-closesocket_cb                           |  *  |                 |               | Socket close callback <br>*Type: pointer*
-open_cb                                  |  *  |                 |               | File open callback to provide race-free CLOEXEC <br>*Type: pointer*
-opaque                                   |  *  |                 |               | Application opaque (set with rd_kafka_conf_set_opaque()) <br>*Type: pointer*
-default_topic_conf                       |  *  |                 |               | Default topic configuration for automatically subscribed topics <br>*Type: pointer*
-internal.termination.signal              |  *  | 0 .. 128        |             0 | Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed. <br>*Type: integer*
-api.version.request                      |  *  | true, false     |          true | Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used. <br>*Type: boolean*
-api.version.request.timeout.ms           |  *  | 1 .. 300000     |         10000 | Timeout for broker API version requests. <br>*Type: integer*
-api.version.fallback.ms                  |  *  | 0 .. 604800000  |       1200000 | Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade). <br>*Type: integer*
-broker.version.fallback                  |  *  |                 |         0.9.0 | Older broker versions (<0.10.0) provides no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value, such as 0.10.2.1, enables ApiVersionRequests. <br>*Type: string*
-security.protocol                        |  *  | plaintext, ssl, sasl_plaintext, sasl_ssl |     plaintext | Protocol used to communicate with brokers. <br>*Type: enum value*
-ssl.cipher.suites                        |  *  |                 |               | A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3). <br>*Type: string*
-ssl.key.location                         |  *  |                 |               | Path to client's private key (PEM) used for authentication. <br>*Type: string*
-ssl.key.password                         |  *  |                 |               | Private key passphrase <br>*Type: string*
-ssl.certificate.location                 |  *  |                 |               | Path to client's public key (PEM) used for authentication. <br>*Type: string*
-ssl.ca.location                          |  *  |                 |               | File or directory path to CA certificate(s) for verifying the broker's key. <br>*Type: string*
-ssl.crl.location                         |  *  |                 |               | Path to CRL for verifying broker's certificate validity. <br>*Type: string*
-sasl.mechanisms                          |  *  |                 |        GSSAPI | SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. **NOTE**: Despite the name only one mechanism must be configured. <br>*Type: string*
-sasl.kerberos.service.name               |  *  |                 |         kafka | Kerberos principal name that Kafka runs as. <br>*Type: string*
-sasl.kerberos.principal                  |  *  |                 |   kafkaclient | This client's Kerberos principal name. <br>*Type: string*
-sasl.kerberos.kinit.cmd                  |  *  |                 | kinit -S "%{sasl.kerberos.service.name}/%{broker.name}" -k -t "%{sasl.kerberos.keytab}" %{sasl.kerberos.principal} | Full kerberos kinit command string, %{config.prop.name} is replaced by corresponding config object value, %{broker.name} returns the broker's hostname. <br>*Type: string*
-sasl.kerberos.keytab                     |  *  |                 |               | Path to Kerberos keytab file. Uses system default if not set.**NOTE**: This is not automatically used but must be added to the template in sasl.kerberos.kinit.cmd as ` ... -t %{sasl.kerberos.keytab}`. <br>*Type: string*
-sasl.kerberos.min.time.before.relogin    |  *  | 1 .. 86400000   |         60000 | Minimum time in milliseconds between key refresh attempts. <br>*Type: integer*
-sasl.username                            |  *  |                 |               | SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms <br>*Type: string*
-sasl.password                            |  *  |                 |               | SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism <br>*Type: string*
-plugin.library.paths                     |  *  |                 |               | List of plugin libaries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically. <br>*Type: string*
-interceptors                             |  *  |                 |               | Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors. <br>*Type: *
-group.id                                 |  *  |                 |               | Client group id string. All clients sharing the same group.id belong to the same group. <br>*Type: string*
-partition.assignment.strategy            |  *  |                 | range,roundrobin | Name of partition assignment strategy to use when elected group leader assigns partitions to group members. <br>*Type: string*
-session.timeout.ms                       |  *  | 1 .. 3600000    |         30000 | Client group session and failure detection timeout. <br>*Type: integer*
-heartbeat.interval.ms                    |  *  | 1 .. 3600000    |          1000 | Group session keepalive heartbeat interval. <br>*Type: integer*
-group.protocol.type                      |  *  |                 |      consumer | Group protocol type <br>*Type: string*
-coordinator.query.interval.ms            |  *  | 1 .. 3600000    |        600000 | How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment. <br>*Type: integer*
-enable.auto.commit                       |  C  | true, false     |          true | Automatically and periodically commit offsets in the background. <br>*Type: boolean*
-auto.commit.interval.ms                  |  C  | 0 .. 86400000   |          5000 | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. (0 = disable). This setting is used by the high-level consumer. <br>*Type: integer*
-enable.auto.offset.store                 |  C  | true, false     |          true | Automatically store offset of last message provided to application. <br>*Type: boolean*
-queued.min.messages                      |  C  | 1 .. 10000000   |        100000 | Minimum number of messages per topic+partition librdkafka tries to maintain in the local consumer queue. <br>*Type: integer*
-queued.max.messages.kbytes               |  C  | 1 .. 1000000000 |       1000000 | Maximum number of kilobytes per topic+partition in the local consumer queue. This value may be overshot by fetch.message.max.bytes. This property has higher priority than queued.min.messages. <br>*Type: integer*
-fetch.wait.max.ms                        |  C  | 0 .. 300000     |           100 | Maximum time the broker may wait to fill the response with fetch.min.bytes. <br>*Type: integer*
-fetch.message.max.bytes                  |  C  | 1 .. 1000000000 |       1048576 | Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched. <br>*Type: integer*
-max.partition.fetch.bytes                |  C  |                 |               | Alias for `fetch.message.max.bytes`
-fetch.min.bytes                          |  C  | 1 .. 100000000  |             1 | Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting. <br>*Type: integer*
-fetch.error.backoff.ms                   |  C  | 0 .. 300000     |           500 | How long to postpone the next fetch request for a topic+partition in case of a fetch error. <br>*Type: integer*
-offset.store.method                      |  C  | none, file, broker |        broker | Offset commit store method: 'file' - local file store (offset.store.path, et.al), 'broker' - broker commit store (requires Apache Kafka 0.8.2 or later on the broker). <br>*Type: enum value*
-consume_cb                               |  C  |                 |               | Message consume callback (set with rd_kafka_conf_set_consume_cb()) <br>*Type: pointer*
-rebalance_cb                             |  C  |                 |               | Called after consumer group has been rebalanced (set with rd_kafka_conf_set_rebalance_cb()) <br>*Type: pointer*
-offset_commit_cb                         |  C  |                 |               | Offset commit result propagation callback. (set with rd_kafka_conf_set_offset_commit_cb()) <br>*Type: pointer*
-enable.partition.eof                     |  C  | true, false     |          true | Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition. <br>*Type: boolean*
-check.crcs                               |  C  | true, false     |         false | Verify CRC32 of consumed messages, ensuring no on-the-wire or on-disk corruption to the messages occurred. This check comes at slightly increased CPU usage. <br>*Type: boolean*
-queue.buffering.max.messages             |  P  | 1 .. 10000000   |        100000 | Maximum number of messages allowed on the producer queue. <br>*Type: integer*
-queue.buffering.max.kbytes               |  P  | 1 .. 2097151    |       4000000 | Maximum total message size sum allowed on the producer queue. This property has higher priority than queue.buffering.max.messages. <br>*Type: integer*
-queue.buffering.max.ms                   |  P  | 0 .. 900000     |             0 | Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency. <br>*Type: integer*
-linger.ms                                |  P  |                 |               | Alias for `queue.buffering.max.ms`
-message.send.max.retries                 |  P  | 0 .. 10000000   |             2 | How many times to retry sending a failing MessageSet. **Note:** retrying may cause reordering. <br>*Type: integer*
-retries                                  |  P  |                 |               | Alias for `message.send.max.retries`
-retry.backoff.ms                         |  P  | 1 .. 300000     |           100 | The backoff time in milliseconds before retrying a message send. <br>*Type: integer*
-compression.codec                        |  P  | none, gzip, snappy, lz4 |          none | compression codec to use for compressing message sets. This is the default value for all topics, may be overriden by the topic configuration property `compression.codec`.  <br>*Type: enum value*
-batch.num.messages                       |  P  | 1 .. 1000000    |         10000 | Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by message.max.bytes. <br>*Type: integer*
-delivery.report.only.error               |  P  | true, false     |         false | Only provide delivery reports for failed messages. <br>*Type: boolean*
-dr_cb                                    |  P  |                 |               | Delivery report callback (set with rd_kafka_conf_set_dr_cb()) <br>*Type: pointer*
-dr_msg_cb                                |  P  |                 |               | Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb()) <br>*Type: pointer*
-
-
-## Topic configuration properties
-
-Property                                 | C/P | Range           |       Default | Description              
------------------------------------------|-----|-----------------|--------------:|--------------------------
-request.required.acks                    |  P  | -1 .. 1000      |             1 | This field indicates how many acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *1*=Only the leader broker will need to ack the message, *-1* or *all*=broker will block until message is committed by all in sync replicas (ISRs) or broker's `in.sync.replicas` setting before sending response.  <br>*Type: integer*
-acks                                     |  P  |                 |               | Alias for `request.required.acks`
-request.timeout.ms                       |  P  | 1 .. 900000     |          5000 | The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0. <br>*Type: integer*
-message.timeout.ms                       |  P  | 0 .. 900000     |        300000 | Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. <br>*Type: integer*
-produce.offset.report                    |  P  | true, false     |         false | Report offset of produced message back to application. The application must be use the `dr_msg_cb` to retrieve the offset from `rd_kafka_message_t.offset`. <br>*Type: boolean*
-partitioner_cb                           |  P  |                 |               | Partitioner callback (set with rd_kafka_topic_conf_set_partitioner_cb()) <br>*Type: pointer*
-opaque                                   |  *  |                 |               | Application opaque (set with rd_kafka_topic_conf_set_opaque()) <br>*Type: pointer*
-compression.codec                        |  P  | none, gzip, snappy, lz4, inherit |       inherit | Compression codec to use for compressing message sets.  <br>*Type: enum value*
-auto.commit.enable                       |  C  | true, false     |          true | If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). **NOTE:** This property should only be used with the simple legacy consumer, when using the high-level KafkaConsumer the global `enable.auto.commit` property must be used instead. **NOTE:** There is currently no zookeeper integration, offsets will be written to broker or local file according to offset.store.method. <br>*Type: boolean*
-enable.auto.commit                       |  C  |                 |               | Alias for `auto.commit.enable`
-auto.commit.interval.ms                  |  C  | 10 .. 86400000  |         60000 | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. This setting is used by the low-level legacy consumer. <br>*Type: integer*
-auto.offset.reset                        |  C  | smallest, earliest, beginning, largest, latest, end, error |       largest | Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error which is retrieved by consuming messages and checking 'message->err'. <br>*Type: enum value*
-offset.store.path                        |  C  |                 |             . | Path to local file for storing offsets. If the path is a directory a filename will be automatically generated in that directory based on the topic and partition. <br>*Type: string*
-offset.store.sync.interval.ms            |  C  | -1 .. 86400000  |            -1 | fsync() interval for the offset file, in milliseconds. Use -1 to disable syncing, and 0 for immediate sync after each write. <br>*Type: integer*
-offset.store.method                      |  C  | file, broker    |        broker | Offset commit store method: 'file' - local file store (offset.store.path, et.al), 'broker' - broker commit store (requires "group.id" to be configured and Apache Kafka 0.8.2 or later on the broker.). <br>*Type: enum value*
-consume.callback.max.messages            |  C  | 0 .. 1000000    |             0 | Maximum number of messages to dispatch in one `rd_kafka_consume_callback*()` call (0 = unlimited) <br>*Type: integer*
-
-### C/P legend: C = Consumer, P = Producer, * = both

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/CONTRIBUTING.md b/thirdparty/librdkafka-0.11.1/CONTRIBUTING.md
deleted file mode 100644
index 5da7c77..0000000
--- a/thirdparty/librdkafka-0.11.1/CONTRIBUTING.md
+++ /dev/null
@@ -1,271 +0,0 @@
-# Contributing to librdkafka
-
-(This document is based on [curl's CONTRIBUTE.md](https://github.com/curl/curl/blob/master/docs/CONTRIBUTE.md) - thank you!)
-
-This document is intended to offer guidelines on how to best contribute to the
-librdkafka project. This concerns new features as well as bug fixes and
-general improvements.
-
-### License and copyright
-
-When contributing with code, you agree to put your changes and new code under
-the same license librdkafka is already using unless stated and agreed
-otherwise.
-
-When changing existing source code, you do not alter the copyright of the
-original file(s). The copyright will still be owned by the original creator(s)
-or those who have been assigned copyright by the original author(s).
-
-By submitting a patch to the librdkafka, you are assumed to have the right
-to the code and to be allowed by your employer or whatever to hand over that
-patch/code to us. We will credit you for your changes as far as possible, to
-give credit but also to keep a trace back to who made what changes. Please
-always provide us with your full real name when contributing!
-
-Official librdkafka project maintainer(s) assume ownership of all accepted
-submissions.
-
-## Write a good patch
-
-### Follow code style
-
-When writing C code, follow the code style already established in
-the project. Consistent style makes code easier to read and mistakes less
-likely to happen.
-
-See the end of this document for the C style guide to use in librdkafka.
-
-
-### Write Separate Changes
-
-It is annoying when you get a huge patch from someone that is said to fix 511
-odd problems, but discussions and opinions don't agree with 510 of them - or
-509 of them were already fixed in a different way. Then the person merging
-this change needs to extract the single interesting patch from somewhere
-within the huge pile of source, and that gives a lot of extra work.
-
-Preferably, each fix that correct a problem should be in its own patch/commit
-with its own description/commit message stating exactly what they correct so
-that all changes can be selectively applied by the maintainer or other
-interested parties.
-
-Also, separate changes enable bisecting much better when we track problems
-and regression in the future.
-
-### Patch Against Recent Sources
-
-Please try to make your patches against latest master branch.
-
-### Test Cases
-
-Bugfixes should also include a new test case in the regression test suite
-that verifies the bug is fixed.
-Create a new tests/00<freenumber>-<short_bug_description>.c file and
-try to reproduce the issue in its most simple form.
-Verify that the test case fails for earlier versions and passes with your
-bugfix in-place.
-
-New features and APIs should also result in an added test case.
-
-Submitted patches must pass all existing tests.
-For more information on the test suite see [tests/README]
-
-
-
-## How to get your changes into the main sources
-
-File a [pull request on github](https://github.com/edenhill/librdkafka/pulls)
-
-Your change will be reviewed and discussed there and you will be
-expected to correct flaws pointed out and update accordingly, or the change
-risk stalling and eventually just get deleted without action. As a submitter
-of a change, you are the owner of that change until it has been merged.
-
-Make sure to monitor your PR on github and answer questions and/or
-fix nits/flaws. This is very important. We will take lack of replies as a
-sign that you're not very anxious to get your patch accepted and we tend to
-simply drop such changes.
-
-When you adjust your pull requests after review, please squash the
-commits so that we can review the full updated version more easily
-and keep history cleaner.
-
-For example:
-
-    # Interactive rebase to let you squash/fixup commits
-    $ git rebase -i master
-
-    # Mark fixes-on-fixes commits as 'fixup' (or just 'f') in the
-    # first column. These will be silently integrated into the
-    # previous commit, so make sure to move the fixup-commit to
-    # the line beneath the parent commit.
-
-    # Since this probably rewrote the history of previously pushed
-    # commits you will need to make a force push, which is usually
-    # a bad idea but works good for pull requests.
-    $ git push --force origin your_feature_branch
-
-
-### Write good commit messages
-
-A short guide to how to write commit messages in the curl project.
-
-    ---- start ----
-    [area]: [short line describing the main effect] [(#issuenumber)]
-           -- empty line --
-    [full description, no wider than 72 columns that describe as much as
-    possible as to why this change is made, and possibly what things
-    it fixes and everything else that is related]
-    ---- stop ----
-
-Example:
-
-    cgrp: restart query timer on all heartbeat failures (#10023)
-    
-    If unhandled errors were received in HeartbeatResponse
-    the cgrp could get stuck in a state where it would not
-    refresh its coordinator.
-
-
-
-# librdkafka C style guide
-
-## Function and globals naming
-
-Use self-explanatory hierarchical snake-case naming.
-Pretty much all symbols should start with `rd_kafka_`, followed by
-their subsystem (e.g., `cgrp`, `broker`, `buf`, etc..), followed by an
-action (e.g, `find`, `get`, `clear`, ..).
-
-
-## Variable naming
-
-For existing types use the type prefix as variable name.
-The type prefix is typically the first part of struct member fields.
-Example:
-
-  * `rd_kafka_broker_t` has field names starting with `rkb_..`, thus broker
-     variable names should be named `rkb`
-
-
-For other types use reasonably concise but descriptive names.
-`i` and `j` are typical int iterators.
-
-## Variable declaration
-
-Variables must be declared at the head of a scope, no in-line variable
-declarations are allowed.
-
-## Indenting
-
-Use 8 spaces indent, same as the Linux kernel.
-In emacs, use `c-set-style "linux`.
-For C++, use Google's C++ style.
-
-## Comments
-
-Use `/* .. */` comments, not `// ..`
-
-For functions, use doxygen syntax, e.g.:
-
-    /**
-     * @brief <short description>
-     * ..
-     * @returns <something..>
-     */
-
-
-Make sure to comment non-obvious code and situations where the full
-context of an operation is not easily graspable.
-
-Also make sure to update existing comments when the code changes.
-
-
-## Line length
-
-Try hard to keep line length below 80 characters, when this is not possible
-exceed it with reason.
-
-
-## Braces
-
-Braces go on the same line as their enveloping statement:
-
-    int some_func (..) {
-      while (1) {
-        if (1) {
-          do something;
-          ..
-        } else {
-          do something else;
-          ..
-        }
-      }
- 
-      /* Single line scopes should not have braces */
-      if (1)
-        hi();
-      else if (2)
-        /* Say hello */
-        hello();
-      else
-        bye();
-
-
-## Spaces
-
-All expression parentheses should be prefixed and suffixed with a single space:
-
-    int some_func (int a) {
-
-        if (1)
-          ....;
-
-        for (i = 0 ; i < 19 ; i++) {
-
-
-        }
-    }
-
-
-Use space around operators:
-
-    int a = 2;
-  
-    if (b >= 3)
-       c += 2;
-
-Except for these:
-  
-    d++;
-    --e;
-
-
-## New block on new line
-
-New blocks should be on a new line:
-
-    if (1)
-      new();
-    else
-      old();
-
-
-## Parentheses
-
-Don't assume the reader knows C operator precedence by heart for complex
-statements, add parentheses to ease readability.
-
-
-## ifdef hell
-
-Avoid ifdef's as much as possible.
-Platform support checking should be performed in configure.librdkafka.
-
-
-
-
-
-# librdkafka C++ style guide
-
-Follow [Google's C++ style guide](https://google.github.io/styleguide/cppguide.html)


[44/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/nuget/templates/librdkafka.redist.nuspec
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/nuget/templates/librdkafka.redist.nuspec b/thirdparty/librdkafka-0.11.1/packaging/nuget/templates/librdkafka.redist.nuspec
deleted file mode 100644
index f48e523..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/nuget/templates/librdkafka.redist.nuspec
+++ /dev/null
@@ -1,21 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<package xmlns="http://schemas.microsoft.com/packaging/2011/10/nuspec.xsd">
-  <metadata>
-    <id>librdkafka.redist</id>
-    <version>${version}</version>
-    <title>librdkafka - redistributable</title>
-    <authors>Magnus Edenhill, edenhill</authors>
-    <owners>Magnus Edenhill, edenhill</owners>
-    <requireLicenseAcceptance>false</requireLicenseAcceptance>
-    <licenseUrl>https://github.com/edenhill/librdkafka/blob/master/LICENSES.txt</licenseUrl>
-    <projectUrl>https://github.com/edenhill/librdkafka</projectUrl>
-    <description>The Apache Kafka C/C++ client library - redistributable</description>
-    <summary>The Apache Kafka C/C++ client library</summary>
-    <releaseNotes>Release of librdkafka</releaseNotes>
-    <copyright>Copyright 2012-2017</copyright>
-    <tags>native apache kafka librdkafka C C++ nativepackage</tags>
-  </metadata>
-  <files>
-    <file src="**" />
-  </files>
-</package>

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/nuget/templates/librdkafka.redist.props
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/nuget/templates/librdkafka.redist.props b/thirdparty/librdkafka-0.11.1/packaging/nuget/templates/librdkafka.redist.props
deleted file mode 100644
index 0b96886..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/nuget/templates/librdkafka.redist.props
+++ /dev/null
@@ -1,18 +0,0 @@
-<?xml version="1.0" encoding="utf-8" ?>
-<Project ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Content Include="$(MSBuildThisFileDirectory)..\..\runtimes\win7-x86\native\*">
-      <Link>x86\%(Filename)%(Extension)</Link>
-      <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
-    </Content>
-    <Content Include="$(MSBuildThisFileDirectory)..\..\runtimes\win7-x64\native\*">
-      <Link>x64\%(Filename)%(Extension)</Link>
-      <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
-    </Content>
-  </ItemGroup>
-  <ItemDefinitionGroup>
-    <ClCompile>
-      <AdditionalIncludeDirectories>$(MSBuildThisFileDirectory)include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-    </ClCompile>
-  </ItemDefinitionGroup>
-</Project>

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/nuget/templates/librdkafka.redist.targets
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/nuget/templates/librdkafka.redist.targets b/thirdparty/librdkafka-0.11.1/packaging/nuget/templates/librdkafka.redist.targets
deleted file mode 100644
index 4b5c9c4..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/nuget/templates/librdkafka.redist.targets
+++ /dev/null
@@ -1,19 +0,0 @@
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemDefinitionGroup>
-    <Link>
-      <AdditionalDependencies Condition="'$(Platform)' == 'x64'">$(MSBuildThisFileDirectory)..\..\runtimes\win7-x64\native\librdkafka.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalDependencies Condition="'$(Platform)' != 'x64'">$(MSBuildThisFileDirectory)..\..\runtimes\win7-x86\native\librdkafka.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories Condition="'$(Platform)' == 'x64'">$(MSBuildThisFileDirectory)..\..\runtimes\win7-x64\native;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-      <AdditionalLibraryDirectories Condition="'$(Platform)' != 'x64'">$(MSBuildThisFileDirectory)..\..\runtimes\win7-x86\native;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-    </Link>
-    <ClCompile>
-      <AdditionalIncludeDirectories>$(MSBuildThisFileDirectory)include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-    </ClCompile>
-  </ItemDefinitionGroup>
-  <ItemGroup Condition="'$(Platform)' == 'x64'">
-    <ReferenceCopyLocalPaths Include="$(MSBuildThisFileDirectory)..\..\runtimes\win7-x64\native\librdkafka.dll" />
-  </ItemGroup>
-  <ItemGroup Condition="'$(Platform)' != 'x64'">
-    <ReferenceCopyLocalPaths Include="$(MSBuildThisFileDirectory)..\..\runtimes\win7-x86\native\librdkafka.dll" />
-  </ItemGroup>
-</Project>

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/nuget/zfile/__init__.py
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/nuget/zfile/__init__.py b/thirdparty/librdkafka-0.11.1/packaging/nuget/zfile/__init__.py
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/nuget/zfile/zfile.py
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/nuget/zfile/zfile.py b/thirdparty/librdkafka-0.11.1/packaging/nuget/zfile/zfile.py
deleted file mode 100644
index 8616078..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/nuget/zfile/zfile.py
+++ /dev/null
@@ -1,100 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import tarfile
-import zipfile
-import rpmfile
-
-class ZFile (object):
-    def __init__(self, path, mode='r', ext=None):
-        super(ZFile, self).__init__()
-
-        if ext is not None:
-            _ext = ext
-        else:
-            _ext = os.path.splitext(path)[-1]
-        if _ext.startswith('.'):
-            _ext = _ext[1:]
-
-        if zipfile.is_zipfile(path) or _ext == 'zip':
-            self.f = zipfile.ZipFile(path, mode)
-        elif tarfile.is_tarfile(path) or _ext in ('tar', 'tgz', 'gz'):
-            self.f = tarfile.open(path, mode)
-        elif _ext == 'rpm':
-            self.f = rpmfile.open(path, mode + 'b')
-        else:
-            raise ValueError('Unsupported file extension: %s' % path)
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, *args):
-        if callable(getattr(self.f, 'close', None)):
-            self.f.close()
-
-    def getnames(self):
-        if isinstance(self.f, zipfile.ZipFile):
-            return self.f.namelist()
-        elif isinstance(self.f, tarfile.TarFile):
-            return self.f.getnames()
-        elif isinstance(self.f, rpmfile.RPMFile):
-            return [x.name for x in self.f.getmembers()]
-        else:
-            raise NotImplementedError
-
-    def headers(self):
-        if isinstance(self.f, rpmfile.RPMFile):
-            return self.f.headers
-        else:
-            return dict()
-
-    def extract_to(self, member, path):
-        """ Extract compress file's \p member to \p path
-            If \p path is a directory the member's basename will used as
-            filename, otherwise path is considered the full file path name. """
-
-        if not os.path.isdir(os.path.dirname(path)):
-            os.makedirs(os.path.dirname(path))
-
-        if os.path.isdir(path):
-            path = os.path.join(path, os.path.basename(member))
-
-        with open(path, 'wb') as of:
-            if isinstance(self.f, zipfile.ZipFile):
-                zf = self.f.open(member)
-            else:
-                zf = self.f.extractfile(member)
-
-            while True:
-                b = zf.read(1024*100)
-                if b:
-                    of.write(b)
-                else:
-                    break
-
-            zf.close()
-
-
-    @classmethod
-    def extract (cls, zpath, member, outpath):
-        """
-        Extract file member (full internal path) to output from
-        archive zpath.
-        """
-
-        with ZFile(zpath) as zf:
-            zf.extract_to(member, outpath)
-
-
-    @classmethod
-    def compress (cls, zpath, paths, stripcnt=0, ext=None):
-        """
-        Create new compressed file \p zpath containing files in \p paths
-        """
-
-        with ZFile(zpath, 'w', ext=ext) as zf:
-            for p in paths:
-                outp = os.path.sep.join(p.split(os.path.sep)[stripcnt:])
-                print('zip %s to %s (stripcnt %d)' % (p, outp, stripcnt))
-                zf.f.write(p, outp)
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/rpm/.gitignore
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/rpm/.gitignore b/thirdparty/librdkafka-0.11.1/packaging/rpm/.gitignore
deleted file mode 100644
index cf122d0..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/rpm/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-*.log
-available_pkgs
-installed_pkgs

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/rpm/Makefile
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/rpm/Makefile b/thirdparty/librdkafka-0.11.1/packaging/rpm/Makefile
deleted file mode 100644
index 5e949ca..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/rpm/Makefile
+++ /dev/null
@@ -1,79 +0,0 @@
-PACKAGE_NAME?=	librdkafka
-VERSION?=	$(shell ../get_version.py ../../src/rdkafka.h)
-
-# Jenkins CI integration
-BUILD_NUMBER?= 1
-
-MOCK_CONFIG?=default
-
-RESULT_DIR?=pkgs-$(VERSION)-$(BUILD_NUMBER)-$(MOCK_CONFIG)
-
-all: rpm
-
-
-SOURCES:
-	mkdir -p SOURCES
-
-archive: SOURCES
-	cd ../../ && \
-	git archive --prefix=$(PACKAGE_NAME)-$(VERSION)/ \
-		-o packaging/rpm/SOURCES/$(PACKAGE_NAME)-$(VERSION).tar.gz HEAD
-
-
-build_prepare: archive
-	mkdir -p $(RESULT_DIR)
-	rm -f $(RESULT_DIR)/$(PACKAGE_NAME)*.rpm
-
-
-srpm: build_prepare
-	/usr/bin/mock \
-		-r $(MOCK_CONFIG) \
-		--define "__version $(VERSION)" \
-		--define "__release $(BUILD_NUMBER)" \
-		--resultdir=$(RESULT_DIR) \
-		--no-clean --no-cleanup-after \
-		--buildsrpm \
-		--spec=librdkafka.spec \
-		--sources=SOURCES
-	@echo "======= Source RPM now available in $(RESULT_DIR) ======="
-
-rpm: srpm
-	/usr/bin/mock \
-		-r $(MOCK_CONFIG) \
-		--define "__version $(VERSION)"\
-		--define "__release $(BUILD_NUMBER)"\
-		--resultdir=$(RESULT_DIR) \
-		--no-clean --no-cleanup-after \
-		--rebuild $(RESULT_DIR)/$(PACKAGE_NAME)*.src.rpm
-	@echo "======= Binary RPMs now available in $(RESULT_DIR) ======="
-
-copy-artifacts:
-	cp $(RESULT_DIR)/*rpm ../../artifacts/
-
-clean:
-	rm -rf SOURCES
-	/usr/bin/mock -r $(MOCK_CONFIG) --clean
-
-distclean: clean
-	rm -f build.log root.log state.log available_pkgs installed_pkgs \
-		*.rpm *.tar.gz
-
-# Prepare ubuntu 14.04 for building RPMs with mock.
-#  - older versions of mock needs the config file to reside in /etc/mock,
-#    so we copy it there.
-#  - add a mock system group (if not already exists)
-#  - add the current user to the mock group.
-#  - prepare mock environment with some needed packages.
-# NOTE: This target should be run with sudo.
-prepare_ubuntu:
-	apt-get -qq update
-	apt-get install -y -qq mock make git python-lzma
-	cp *.cfg /etc/mock/
-	addgroup --system mock || true
-	adduser $$(whoami) mock
-	/usr/bin/mock -r $(MOCK_CONFIG) --init
-	/usr/bin/mock -r $(MOCK_CONFIG) --no-cleanup-after --install epel-release shadow-utils
-
-prepare_centos:
-	yum install -y -q mock make git
-	cp *.cfg /etc/mock/

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/rpm/el7-x86_64.cfg
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/rpm/el7-x86_64.cfg b/thirdparty/librdkafka-0.11.1/packaging/rpm/el7-x86_64.cfg
deleted file mode 100644
index 5022827..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/rpm/el7-x86_64.cfg
+++ /dev/null
@@ -1,40 +0,0 @@
-config_opts['root'] = 'el7-x86_64'
-config_opts['target_arch'] = 'x86_64'
-config_opts['legal_host_arches'] = ('x86_64',)
-config_opts['chroot_setup_cmd'] = 'install @buildsys-build'
-config_opts['dist'] = 'el7'  # only useful for --resultdir variable subst
-config_opts['releasever'] = '7'
-config_opts['docker_unshare_warning'] = False
-config_opts['nosync'] = True
-
-config_opts['yum.conf'] = """
-[main]
-keepcache=1
-debuglevel=2
-reposdir=/dev/null
-logfile=/var/log/yum.log
-retries=15
-obsoletes=1
-gpgcheck=0
-assumeyes=1
-syslog_ident=mock
-syslog_device=
-mdpolicy=group:primary
-
-# repos
-[base]
-name=BaseOS
-mirrorlist=http://mirrorlist.centos.org/?release=7&arch=x86_64&repo=os
-failovermethod=priority
-
-[updates]
-name=updates
-enabled=1
-mirrorlist=http://mirrorlist.centos.org/?release=7&arch=x86_64&repo=updates
-failovermethod=priority
-
-[epel]
-name=epel
-mirrorlist=http://mirrors.fedoraproject.org/mirrorlist?repo=epel-7&arch=x86_64
-failovermethod=priority
-"""

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/packaging/rpm/librdkafka.spec
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/packaging/rpm/librdkafka.spec b/thirdparty/librdkafka-0.11.1/packaging/rpm/librdkafka.spec
deleted file mode 100644
index 8027acf..0000000
--- a/thirdparty/librdkafka-0.11.1/packaging/rpm/librdkafka.spec
+++ /dev/null
@@ -1,103 +0,0 @@
-Name:    librdkafka
-Version: %{__version}
-Release: %{__release}%{?dist}
-%define soname 1
-
-Summary: The Apache Kafka C library
-Group:   Development/Libraries/C and C++
-License: BSD-2-Clause
-URL:     https://github.com/edenhill/librdkafka
-Source:	 librdkafka-%{version}.tar.gz
-
-BuildRequires: zlib-devel libstdc++-devel gcc >= 4.1 gcc-c++ openssl-devel cyrus-sasl-devel lz4-devel python
-BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
-
-%define _source_payload w9.gzdio
-%define _binary_payload w9.gzdio
-
-%description
-librdkafka is the C/C++ client library implementation of the Apache Kafka protocol, containing both Producer and Consumer support.
-
-
-%package -n %{name}%{soname}
-Summary: The Apache Kafka C library
-Group:   Development/Libraries/C and C++
-Requires: zlib libstdc++ cyrus-sasl
-# openssl libraries were extract to openssl-libs in RHEL7
-%if 0%{?rhel} >= 7
-Requires: openssl-libs
-%else
-Requires: openssl
-%endif
-
-%description -n %{name}%{soname}
-librdkafka is the C/C++ client library implementation of the Apache Kafka protocol, containing both Producer and Consumer support.
-
-
-%package -n %{name}-devel
-Summary: The Apache Kafka C library (Development Environment)
-Group:   Development/Libraries/C and C++
-Requires: %{name}%{soname} = %{version}
-
-%description -n %{name}-devel
-librdkafka is the C/C++ client library implementation of the Apache Kafka protocol, containing both Producer and Consumer support.
-
-This package contains headers and libraries required to build applications
-using librdkafka.
-
-
-%prep
-%setup -q -n %{name}-%{version}
-
-%configure
-
-%build
-make
-
-%install
-rm -rf %{buildroot}
-DESTDIR=%{buildroot} make install
-
-%clean
-rm -rf %{buildroot}
-
-%post   -n %{name}%{soname} -p /sbin/ldconfig
-%postun -n %{name}%{soname} -p /sbin/ldconfig
-
-%files -n %{name}%{soname}
-%defattr(444,root,root)
-%{_libdir}/librdkafka.so.%{soname}
-%{_libdir}/librdkafka++.so.%{soname}
-%defattr(-,root,root)
-%doc README.md CONFIGURATION.md INTRODUCTION.md
-%doc LICENSE LICENSE.pycrc LICENSE.queue LICENSE.snappy LICENSE.tinycthread LICENSE.wingetopt
-
-%defattr(-,root,root)
-#%{_bindir}/rdkafka_example
-#%{_bindir}/rdkafka_performance
-
-
-%files -n %{name}-devel
-%defattr(-,root,root)
-%{_includedir}/librdkafka
-%defattr(444,root,root)
-%{_libdir}/librdkafka.a
-%{_libdir}/librdkafka.so
-%{_libdir}/librdkafka++.a
-%{_libdir}/librdkafka++.so
-%{_libdir}/pkgconfig/rdkafka++.pc
-%{_libdir}/pkgconfig/rdkafka.pc
-
-
-%changelog
-* Thu Apr 09 2015 Eduard Iskandarov <e....@corp.mail.ru> 0.8.6-0
-- 0.8.6 simplify build process
-
-* Fri Oct 24 2014 Magnus Edenhill <rd...@edenhill.se> 0.8.5-0
-- 0.8.5 release
-
-* Mon Aug 18 2014 Magnus Edenhill <rd...@edenhill.se> 0.8.4-0
-- 0.8.4 release
-
-* Mon Mar 17 2014 Magnus Edenhill <vk...@edenhill.se> 0.8.3-0
-- Initial RPM package

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/CMakeLists.txt b/thirdparty/librdkafka-0.11.1/src-cpp/CMakeLists.txt
deleted file mode 100644
index bcbc4ae..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/CMakeLists.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-add_library(
-    rdkafka++
-    ConfImpl.cpp
-    ConsumerImpl.cpp
-    HandleImpl.cpp
-    KafkaConsumerImpl.cpp
-    MessageImpl.cpp
-    MetadataImpl.cpp
-    ProducerImpl.cpp
-    QueueImpl.cpp
-    RdKafka.cpp
-    TopicImpl.cpp
-    TopicPartitionImpl.cpp
-)
-
-target_link_libraries(rdkafka++ PUBLIC rdkafka)
-
-# Support '#include <rdkafcpp.h>'
-target_include_directories(rdkafka++ PUBLIC "$<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}>")
-
-install(
-    TARGETS rdkafka++
-    EXPORT "${targets_export_name}"
-    LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}"
-    ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}"
-    RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
-    INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
-)
-
-install(
-    FILES "rdkafkacpp.h"
-    DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/librdkafka"
-)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/ConfImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/ConfImpl.cpp b/thirdparty/librdkafka-0.11.1/src-cpp/ConfImpl.cpp
deleted file mode 100644
index 709c728..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/ConfImpl.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * librdkafka - Apache Kafka C/C++ library
- *
- * Copyright (c) 2014 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <string>
-#include <list>
-
-#include "rdkafkacpp_int.h"
-
-
-
-RdKafka::ConfImpl::ConfResult RdKafka::ConfImpl::set(const std::string &name,
-						     const std::string &value,
-						     std::string &errstr) {
-  rd_kafka_conf_res_t res;
-  char errbuf[512];
-
-  if (this->conf_type_ == CONF_GLOBAL)
-    res = rd_kafka_conf_set(this->rk_conf_,
-                            name.c_str(), value.c_str(),
-                            errbuf, sizeof(errbuf));
-  else
-    res = rd_kafka_topic_conf_set(this->rkt_conf_,
-                                  name.c_str(), value.c_str(),
-                                  errbuf, sizeof(errbuf));
-
-  if (res != RD_KAFKA_CONF_OK)
-    errstr = errbuf;
-
-  return static_cast<Conf::ConfResult>(res);
-}
-
-
-std::list<std::string> *RdKafka::ConfImpl::dump () {
-
-  const char **arrc;
-  size_t cnt;
-  std::list<std::string> *arr;
-
-  if (rk_conf_)
-    arrc = rd_kafka_conf_dump(rk_conf_, &cnt);
-  else
-    arrc = rd_kafka_topic_conf_dump(rkt_conf_, &cnt);
-
-  arr = new std::list<std::string>();
-  for (int i = 0 ; i < static_cast<int>(cnt) ; i++)
-    arr->push_back(std::string(arrc[i]));
-
-  rd_kafka_conf_dump_free(arrc, cnt);
-  return arr;
-}
-
-RdKafka::Conf *RdKafka::Conf::create (ConfType type) {
-  ConfImpl *conf = new ConfImpl();
-
-  conf->conf_type_ = type;
-
-  if (type == CONF_GLOBAL)
-    conf->rk_conf_ = rd_kafka_conf_new();
-  else
-    conf->rkt_conf_ = rd_kafka_topic_conf_new();
-
-  return conf;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/ConsumerImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/ConsumerImpl.cpp b/thirdparty/librdkafka-0.11.1/src-cpp/ConsumerImpl.cpp
deleted file mode 100644
index bb46877..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/ConsumerImpl.cpp
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * librdkafka - Apache Kafka C/C++ library
- *
- * Copyright (c) 2014 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <string>
-#include <list>
-#include <cerrno>
-
-#include "rdkafkacpp_int.h"
-
-RdKafka::Consumer::~Consumer () {}
-
-RdKafka::Consumer *RdKafka::Consumer::create (RdKafka::Conf *conf,
-                                              std::string &errstr) {
-  char errbuf[512];
-  RdKafka::ConfImpl *confimpl = dynamic_cast<RdKafka::ConfImpl *>(conf);
-  RdKafka::ConsumerImpl *rkc = new RdKafka::ConsumerImpl();
-  rd_kafka_conf_t *rk_conf = NULL;
-
-  if (confimpl) {
-    if (!confimpl->rk_conf_) {
-      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
-      delete rkc;
-      return NULL;
-    }
-
-    rkc->set_common_config(confimpl);
-
-    rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_);
-  }
-
-  rd_kafka_t *rk;
-  if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf,
-                          errbuf, sizeof(errbuf)))) {
-    errstr = errbuf;
-    delete rkc;
-    return NULL;
-  }
-
-  rkc->rk_ = rk;
-
-
-  return rkc;
-}
-
-int64_t RdKafka::Consumer::OffsetTail (int64_t offset) {
-  return RD_KAFKA_OFFSET_TAIL(offset);
-}
-
-RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic,
-                                                 int32_t partition,
-                                                 int64_t offset) {
-  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
-
-  if (rd_kafka_consume_start(topicimpl->rkt_, partition, offset) == -1)
-    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
-
-  return RdKafka::ERR_NO_ERROR;
-}
-
-
-RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic,
-                                                 int32_t partition,
-                                                 int64_t offset,
-                                                 Queue *queue) {
-  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
-  RdKafka::QueueImpl *queueimpl = dynamic_cast<RdKafka::QueueImpl *>(queue);
-
-  if (rd_kafka_consume_start_queue(topicimpl->rkt_, partition, offset,
-                                   queueimpl->queue_) == -1)
-    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
-
-  return RdKafka::ERR_NO_ERROR;
-}
-
-
-RdKafka::ErrorCode RdKafka::ConsumerImpl::stop (Topic *topic,
-                                                int32_t partition) {
-  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
-
-  if (rd_kafka_consume_stop(topicimpl->rkt_, partition) == -1)
-    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
-
-  return RdKafka::ERR_NO_ERROR;
-}
-
-RdKafka::ErrorCode RdKafka::ConsumerImpl::seek (Topic *topic,
-						int32_t partition,
-						int64_t offset,
-						int timeout_ms) {
-  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
-
-  if (rd_kafka_seek(topicimpl->rkt_, partition, offset, timeout_ms) == -1)
-    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
-
-  return RdKafka::ERR_NO_ERROR;
-}
-
-RdKafka::Message *RdKafka::ConsumerImpl::consume (Topic *topic,
-                                                  int32_t partition,
-                                                  int timeout_ms) {
-  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
-  rd_kafka_message_t *rkmessage;
-
-  rkmessage = rd_kafka_consume(topicimpl->rkt_, partition, timeout_ms);
-  if (!rkmessage)
-    return new RdKafka::MessageImpl(topic,
-                                    static_cast<RdKafka::ErrorCode>
-                                    (rd_kafka_last_error()));
-
-  return new RdKafka::MessageImpl(topic, rkmessage);
-}
-
-namespace {
-  /* Helper struct for `consume_callback'.
-   * Encapsulates the values we need in order to call `rd_kafka_consume_callback'
-   * and keep track of the C++ callback function and `opaque' value.
-   */
-  struct ConsumerImplCallback {
-    ConsumerImplCallback(RdKafka::Topic* topic, RdKafka::ConsumeCb* cb, void* data)
-      : topic(topic), cb_cls(cb), cb_data(data) {
-    }
-    /* This function is the one we give to `rd_kafka_consume_callback', with
-     * the `opaque' pointer pointing to an instance of this struct, in which
-     * we can find the C++ callback and `cb_data'.
-     */
-    static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
-      ConsumerImplCallback *instance = static_cast<ConsumerImplCallback*>(opaque);
-      RdKafka::MessageImpl message(instance->topic, msg, false /*don't free*/);
-      instance->cb_cls->consume_cb(message, instance->cb_data);
-    }
-    RdKafka::Topic *topic;
-    RdKafka::ConsumeCb *cb_cls;
-    void *cb_data;
-  };
-}
-
-int RdKafka::ConsumerImpl::consume_callback (RdKafka::Topic* topic,
-                                             int32_t partition,
-                                             int timeout_ms,
-                                             RdKafka::ConsumeCb *consume_cb,
-                                             void *opaque) {
-  RdKafka::TopicImpl *topicimpl = static_cast<RdKafka::TopicImpl *>(topic);
-  ConsumerImplCallback context(topic, consume_cb, opaque);
-  return rd_kafka_consume_callback(topicimpl->rkt_, partition, timeout_ms,
-                                   &ConsumerImplCallback::consume_cb_trampoline, &context);
-}
-
-
-RdKafka::Message *RdKafka::ConsumerImpl::consume (Queue *queue,
-                                                  int timeout_ms) {
-  RdKafka::QueueImpl *queueimpl = dynamic_cast<RdKafka::QueueImpl *>(queue);
-  rd_kafka_message_t *rkmessage;
-
-  rkmessage = rd_kafka_consume_queue(queueimpl->queue_, timeout_ms);
-  if (!rkmessage)
-    return new RdKafka::MessageImpl(NULL,
-                                    static_cast<RdKafka::ErrorCode>
-                                    (rd_kafka_last_error()));
-  /*
-   * Recover our Topic * from the topic conf's opaque field, which we
-   * set in RdKafka::Topic::create() for just this kind of situation.
-   */
-  void *opaque = rd_kafka_topic_opaque(rkmessage->rkt);
-  Topic *topic = static_cast<Topic *>(opaque);
-
-  return new RdKafka::MessageImpl(topic, rkmessage);
-}
-
-namespace {
-  /* Helper struct for `consume_callback' with a Queue.
-   * Encapsulates the values we need in order to call `rd_kafka_consume_callback'
-   * and keep track of the C++ callback function and `opaque' value.
-   */
-  struct ConsumerImplQueueCallback {
-    ConsumerImplQueueCallback(RdKafka::ConsumeCb *cb, void *data)
-      : cb_cls(cb), cb_data(data) {
-    }
-    /* This function is the one we give to `rd_kafka_consume_callback', with
-     * the `opaque' pointer pointing to an instance of this struct, in which
-     * we can find the C++ callback and `cb_data'.
-     */
-    static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
-      ConsumerImplQueueCallback *instance = static_cast<ConsumerImplQueueCallback *>(opaque);
-      /*
-       * Recover our Topic * from the topic conf's opaque field, which we
-       * set in RdKafka::Topic::create() for just this kind of situation.
-       */
-      void *topic_opaque = rd_kafka_topic_opaque(msg->rkt);
-      RdKafka::Topic *topic = static_cast<RdKafka::Topic *>(topic_opaque);
-      RdKafka::MessageImpl message(topic, msg, false /*don't free*/);
-      instance->cb_cls->consume_cb(message, instance->cb_data);
-    }
-    RdKafka::ConsumeCb *cb_cls;
-    void *cb_data;
-  };
-}
-
-int RdKafka::ConsumerImpl::consume_callback (Queue *queue,
-                                             int timeout_ms,
-                                             RdKafka::ConsumeCb *consume_cb,
-                                             void *opaque) {
-  RdKafka::QueueImpl *queueimpl = dynamic_cast<RdKafka::QueueImpl *>(queue);
-  ConsumerImplQueueCallback context(consume_cb, opaque);
-  return rd_kafka_consume_callback_queue(queueimpl->queue_, timeout_ms,
-                                         &ConsumerImplQueueCallback::consume_cb_trampoline,
-                                         &context);
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/HandleImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/HandleImpl.cpp b/thirdparty/librdkafka-0.11.1/src-cpp/HandleImpl.cpp
deleted file mode 100644
index 3bdccbf..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/HandleImpl.cpp
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- * librdkafka - Apache Kafka C/C++ library
- *
- * Copyright (c) 2014 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <string>
-#include <list>
-
-#include "rdkafkacpp_int.h"
-
-void RdKafka::consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
-  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
-  RdKafka::Topic* topic = static_cast<Topic *>(rd_kafka_topic_opaque(msg->rkt));
-
-  RdKafka::MessageImpl message(topic, msg, false /*don't free*/);
-
-  handle->consume_cb_->consume_cb(message, opaque);
-}
-
-void RdKafka::log_cb_trampoline (const rd_kafka_t *rk, int level,
-                                 const char *fac, const char *buf) {
-  if (!rk) {
-    rd_kafka_log_print(rk, level, fac, buf);
-    return;
-  }
-
-  void *opaque = rd_kafka_opaque(rk);
-  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
-
-  if (!handle->event_cb_) {
-    rd_kafka_log_print(rk, level, fac, buf);
-    return;
-  }
-
-  RdKafka::EventImpl event(RdKafka::Event::EVENT_LOG,
-                           RdKafka::ERR_NO_ERROR,
-                           static_cast<RdKafka::Event::Severity>(level),
-                           fac, buf);
-
-  handle->event_cb_->event_cb(event);
-}
-
-
-void RdKafka::error_cb_trampoline (rd_kafka_t *rk, int err,
-                                   const char *reason, void *opaque) {
-  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
-
-  RdKafka::EventImpl event(RdKafka::Event::EVENT_ERROR,
-                           static_cast<RdKafka::ErrorCode>(err),
-                           RdKafka::Event::EVENT_SEVERITY_ERROR,
-                           NULL,
-                           reason);
-
-  handle->event_cb_->event_cb(event);
-}
-
-
-void RdKafka::throttle_cb_trampoline (rd_kafka_t *rk, const char *broker_name,
-				      int32_t broker_id,
-				      int throttle_time_ms,
-				      void *opaque) {
-  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
-
-  RdKafka::EventImpl event(RdKafka::Event::EVENT_THROTTLE);
-  event.str_ = broker_name;
-  event.id_ = broker_id;
-  event.throttle_time_ = throttle_time_ms;
-
-  handle->event_cb_->event_cb(event);
-}
-
-
-int RdKafka::stats_cb_trampoline (rd_kafka_t *rk, char *json, size_t json_len,
-                                  void *opaque) {
-  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
-
-  RdKafka::EventImpl event(RdKafka::Event::EVENT_STATS,
-                           RdKafka::ERR_NO_ERROR,
-                           RdKafka::Event::EVENT_SEVERITY_INFO,
-                           NULL, json);
-
-  handle->event_cb_->event_cb(event);
-
-  return 0;
-}
-
-
-int RdKafka::socket_cb_trampoline (int domain, int type, int protocol,
-                                   void *opaque) {
-  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
-
-  return handle->socket_cb_->socket_cb(domain, type, protocol);
-}
-
-int RdKafka::open_cb_trampoline (const char *pathname, int flags, mode_t mode,
-                                 void *opaque) {
-  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
-
-  return handle->open_cb_->open_cb(pathname, flags, static_cast<int>(mode));
-}
-
-RdKafka::ErrorCode RdKafka::HandleImpl::metadata (bool all_topics,
-                                                  const Topic *only_rkt,
-                                                  Metadata **metadatap, 
-                                                  int timeout_ms) {
-
-  const rd_kafka_metadata_t *cmetadatap=NULL;
-
-  rd_kafka_topic_t *topic = only_rkt ? 
-    static_cast<const TopicImpl *>(only_rkt)->rkt_ : NULL;
-
-  const rd_kafka_resp_err_t rc = rd_kafka_metadata(rk_, all_topics, topic,
-                                                   &cmetadatap,timeout_ms);
-
-  *metadatap = (rc == RD_KAFKA_RESP_ERR_NO_ERROR) ? 
-    new RdKafka::MetadataImpl(cmetadatap) : NULL;
-
-  return static_cast<RdKafka::ErrorCode>(rc);
-}
-
-/**
- * Convert a list of C partitions to C++ partitions
- */
-static void c_parts_to_partitions (const rd_kafka_topic_partition_list_t
-                                   *c_parts,
-                                   std::vector<RdKafka::TopicPartition*>
-                                   &partitions) {
-  partitions.resize(c_parts->cnt);
-  for (int i = 0 ; i < c_parts->cnt ; i++)
-    partitions[i] = new RdKafka::TopicPartitionImpl(&c_parts->elems[i]);
-}
-
-static void free_partition_vector (std::vector<RdKafka::TopicPartition*> &v) {
-  for (unsigned int i = 0 ; i < v.size() ; i++)
-    delete v[i];
-  v.clear();
-}
-
-void
-RdKafka::rebalance_cb_trampoline (rd_kafka_t *rk,
-                                  rd_kafka_resp_err_t err,
-                                  rd_kafka_topic_partition_list_t *c_partitions,
-                                  void *opaque) {
-  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
-  std::vector<RdKafka::TopicPartition*> partitions;
-
-  c_parts_to_partitions(c_partitions, partitions);
-
-  handle->rebalance_cb_->rebalance_cb(
-				      dynamic_cast<RdKafka::KafkaConsumer*>(handle),
-				      static_cast<RdKafka::ErrorCode>(err),
-				      partitions);
-
-  free_partition_vector(partitions);
-}
-
-
-void
-RdKafka::offset_commit_cb_trampoline0 (
-    rd_kafka_t *rk,
-    rd_kafka_resp_err_t err,
-    rd_kafka_topic_partition_list_t *c_offsets, void *opaque) {
-  OffsetCommitCb *cb = static_cast<RdKafka::OffsetCommitCb *>(opaque);
-  std::vector<RdKafka::TopicPartition*> offsets;
-
-  if (c_offsets)
-    c_parts_to_partitions(c_offsets, offsets);
-
-  cb->offset_commit_cb(static_cast<RdKafka::ErrorCode>(err), offsets);
-
-  free_partition_vector(offsets);
-}
-
-static void
-offset_commit_cb_trampoline (
-    rd_kafka_t *rk,
-    rd_kafka_resp_err_t err,
-    rd_kafka_topic_partition_list_t *c_offsets, void *opaque) {
-  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
-  RdKafka::offset_commit_cb_trampoline0(rk, err, c_offsets,
-                                        handle->offset_commit_cb_);
-}
-
-
-void RdKafka::HandleImpl::set_common_config (RdKafka::ConfImpl *confimpl) {
-
-  rd_kafka_conf_set_opaque(confimpl->rk_conf_, this);
-
-  if (confimpl->event_cb_) {
-    rd_kafka_conf_set_log_cb(confimpl->rk_conf_,
-                             RdKafka::log_cb_trampoline);
-    rd_kafka_conf_set_error_cb(confimpl->rk_conf_,
-                               RdKafka::error_cb_trampoline);
-    rd_kafka_conf_set_throttle_cb(confimpl->rk_conf_,
-				  RdKafka::throttle_cb_trampoline);
-    rd_kafka_conf_set_stats_cb(confimpl->rk_conf_,
-                               RdKafka::stats_cb_trampoline);
-    event_cb_ = confimpl->event_cb_;
-  }
-
-  if (confimpl->socket_cb_) {
-    rd_kafka_conf_set_socket_cb(confimpl->rk_conf_,
-                                RdKafka::socket_cb_trampoline);
-    socket_cb_ = confimpl->socket_cb_;
-  }
-
-  if (confimpl->open_cb_) {
-#ifndef _MSC_VER
-    rd_kafka_conf_set_open_cb(confimpl->rk_conf_, RdKafka::open_cb_trampoline);
-    open_cb_ = confimpl->open_cb_;
-#endif
-  }
-
-  if (confimpl->rebalance_cb_) {
-    rd_kafka_conf_set_rebalance_cb(confimpl->rk_conf_,
-                                   RdKafka::rebalance_cb_trampoline);
-    rebalance_cb_ = confimpl->rebalance_cb_;
-  }
-
-  if (confimpl->offset_commit_cb_) {
-    rd_kafka_conf_set_offset_commit_cb(confimpl->rk_conf_,
-                                       offset_commit_cb_trampoline);
-    offset_commit_cb_ = confimpl->offset_commit_cb_;
-  }
-
-  if (confimpl->consume_cb_) {
-    rd_kafka_conf_set_consume_cb(confimpl->rk_conf_,
-                                 RdKafka::consume_cb_trampoline);
-    consume_cb_ = confimpl->consume_cb_;
-  }
-
-}
-
-
-RdKafka::ErrorCode
-RdKafka::HandleImpl::pause (std::vector<RdKafka::TopicPartition*> &partitions) {
-  rd_kafka_topic_partition_list_t *c_parts;
-  rd_kafka_resp_err_t err;
-
-  c_parts = partitions_to_c_parts(partitions);
-
-  err = rd_kafka_pause_partitions(rk_, c_parts);
-
-  if (!err)
-    update_partitions_from_c_parts(partitions, c_parts);
-
-  rd_kafka_topic_partition_list_destroy(c_parts);
-
-  return static_cast<RdKafka::ErrorCode>(err);
-}
-
-
-RdKafka::ErrorCode
-RdKafka::HandleImpl::resume (std::vector<RdKafka::TopicPartition*> &partitions) {
-  rd_kafka_topic_partition_list_t *c_parts;
-  rd_kafka_resp_err_t err;
-
-  c_parts = partitions_to_c_parts(partitions);
-
-  err = rd_kafka_resume_partitions(rk_, c_parts);
-
-  if (!err)
-    update_partitions_from_c_parts(partitions, c_parts);
-
-  rd_kafka_topic_partition_list_destroy(c_parts);
-
-  return static_cast<RdKafka::ErrorCode>(err);
-}
-
-RdKafka::Queue *
-RdKafka::HandleImpl::get_partition_queue (const TopicPartition *part) {
-  rd_kafka_queue_t *rkqu;
-  rkqu = rd_kafka_queue_get_partition(rk_,
-                                      part->topic().c_str(),
-                                      part->partition());
-
-  if (rkqu == NULL)
-    return NULL;
-
-  RdKafka::QueueImpl *queueimpl = new RdKafka::QueueImpl;
-  queueimpl->queue_ = rkqu;
-
-  return queueimpl;
-}
-
-RdKafka::ErrorCode
-RdKafka::HandleImpl::set_log_queue (RdKafka::Queue *queue) {
-        rd_kafka_queue_t *rkqu = NULL;
-        if (queue) {
-                QueueImpl *queueimpl = dynamic_cast<QueueImpl *>(queue);
-                rkqu = queueimpl->queue_;
-        }
-        return static_cast<RdKafka::ErrorCode>(
-                rd_kafka_set_log_queue(rk_, rkqu));
-}
-
-namespace RdKafka {
-
-rd_kafka_topic_partition_list_t *
-partitions_to_c_parts (const std::vector<RdKafka::TopicPartition*> &partitions){
-  rd_kafka_topic_partition_list_t *c_parts;
-
-  c_parts = rd_kafka_topic_partition_list_new((int)partitions.size());
-
-  for (unsigned int i = 0 ; i < partitions.size() ; i++) {
-    const RdKafka::TopicPartitionImpl *tpi =
-        dynamic_cast<const RdKafka::TopicPartitionImpl*>(partitions[i]);
-    rd_kafka_topic_partition_t *rktpar =
-      rd_kafka_topic_partition_list_add(c_parts,
-					tpi->topic_.c_str(), tpi->partition_);
-    rktpar->offset = tpi->offset_;
-  }
-
-  return c_parts;
-}
-
-
-/**
- * @brief Update the application provided 'partitions' with info from 'c_parts'
- */
-void
-update_partitions_from_c_parts (std::vector<RdKafka::TopicPartition*> &partitions,
-				const rd_kafka_topic_partition_list_t *c_parts) {
-  for (int i = 0 ; i < c_parts->cnt ; i++) {
-    rd_kafka_topic_partition_t *p = &c_parts->elems[i];
-
-    /* Find corresponding C++ entry */
-    for (unsigned int j = 0 ; j < partitions.size() ; j++) {
-      RdKafka::TopicPartitionImpl *pp =
-	dynamic_cast<RdKafka::TopicPartitionImpl*>(partitions[j]);
-      if (!strcmp(p->topic, pp->topic_.c_str()) &&
-	  p->partition == pp->partition_) {
-	pp->offset_ = p->offset;
-	pp->err_ = static_cast<RdKafka::ErrorCode>(p->err);
-      }
-    }
-  }
-}
-
-};
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/KafkaConsumerImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/KafkaConsumerImpl.cpp b/thirdparty/librdkafka-0.11.1/src-cpp/KafkaConsumerImpl.cpp
deleted file mode 100644
index f4e79d3..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/KafkaConsumerImpl.cpp
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * librdkafka - Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string>
-#include <vector>
-
-#include "rdkafkacpp_int.h"
-
-RdKafka::KafkaConsumer::~KafkaConsumer () {}
-
-RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (RdKafka::Conf *conf,
-                                                        std::string &errstr) {
-  char errbuf[512];
-  RdKafka::ConfImpl *confimpl = dynamic_cast<RdKafka::ConfImpl *>(conf);
-  RdKafka::KafkaConsumerImpl *rkc = new RdKafka::KafkaConsumerImpl();
-  rd_kafka_conf_t *rk_conf = NULL;
-  size_t grlen;
-
-  if (!confimpl->rk_conf_) {
-    errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
-    delete rkc;
-    return NULL;
-  }
-
-  if (rd_kafka_conf_get(confimpl->rk_conf_, "group.id",
-                        NULL, &grlen) != RD_KAFKA_CONF_OK ||
-      grlen <= 1 /* terminating null only */) {
-    errstr = "\"group.id\" must be configured";
-    delete rkc;
-    return NULL;
-  }
-
-  rkc->set_common_config(confimpl);
-
-  rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_);
-
-  rd_kafka_t *rk;
-  if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf,
-                          errbuf, sizeof(errbuf)))) {
-    errstr = errbuf;
-    delete rkc;
-    return NULL;
-  }
-
-  rkc->rk_ = rk;
-
-  /* Redirect handle queue to cgrp's queue to provide a single queue point */
-  rd_kafka_poll_set_consumer(rk);
-
-  return rkc;
-}
-
-
-
-
-
-
-
-RdKafka::ErrorCode
-RdKafka::KafkaConsumerImpl::subscribe (const std::vector<std::string> &topics) {
-  rd_kafka_topic_partition_list_t *c_topics;
-  rd_kafka_resp_err_t err;
-
-  c_topics = rd_kafka_topic_partition_list_new((int)topics.size());
-
-  for (unsigned int i = 0 ; i < topics.size() ; i++)
-    rd_kafka_topic_partition_list_add(c_topics, topics[i].c_str(),
-                                      RD_KAFKA_PARTITION_UA);
-
-  err = rd_kafka_subscribe(rk_, c_topics);
-
-  rd_kafka_topic_partition_list_destroy(c_topics);
-
-  return static_cast<RdKafka::ErrorCode>(err);
-}
-
-
-
-RdKafka::ErrorCode
-RdKafka::KafkaConsumerImpl::unsubscribe () {
-  return static_cast<RdKafka::ErrorCode>(rd_kafka_unsubscribe(this->rk_));
-}
-
-RdKafka::Message *RdKafka::KafkaConsumerImpl::consume (int timeout_ms) {
-  rd_kafka_message_t *rkmessage;
-
-  rkmessage = rd_kafka_consumer_poll(this->rk_, timeout_ms);
-
-  if (!rkmessage)
-    return new RdKafka::MessageImpl(NULL, RdKafka::ERR__TIMED_OUT);
-
-  return new RdKafka::MessageImpl(rkmessage);
-
-}
-
-
-
-RdKafka::ErrorCode
-RdKafka::KafkaConsumerImpl::assignment (std::vector<RdKafka::TopicPartition*> &partitions) {
-  rd_kafka_topic_partition_list_t *c_parts;
-  rd_kafka_resp_err_t err;
-
-  if ((err = rd_kafka_assignment(rk_, &c_parts)))
-    return static_cast<RdKafka::ErrorCode>(err);
-
-  partitions.resize(c_parts->cnt);
-
-  for (int i = 0 ; i < c_parts->cnt ; i++)
-    partitions[i] = new RdKafka::TopicPartitionImpl(&c_parts->elems[i]);
-
-  rd_kafka_topic_partition_list_destroy(c_parts);
-
-  return RdKafka::ERR_NO_ERROR;
-}
-
-
-RdKafka::ErrorCode
-RdKafka::KafkaConsumerImpl::subscription (std::vector<std::string> &topics) {
-  rd_kafka_topic_partition_list_t *c_topics;
-  rd_kafka_resp_err_t err;
-
-  if ((err = rd_kafka_subscription(rk_, &c_topics)))
-    return static_cast<RdKafka::ErrorCode>(err);
-
-  topics.resize(c_topics->cnt);
-  for (int i = 0 ; i < c_topics->cnt ; i++)
-    topics[i] = std::string(c_topics->elems[i].topic);
-
-  rd_kafka_topic_partition_list_destroy(c_topics);
-
-  return RdKafka::ERR_NO_ERROR;
-}
-
-
-RdKafka::ErrorCode
-RdKafka::KafkaConsumerImpl::assign (const std::vector<TopicPartition*> &partitions) {
-  rd_kafka_topic_partition_list_t *c_parts;
-  rd_kafka_resp_err_t err;
-
-  c_parts = partitions_to_c_parts(partitions);
-
-  err = rd_kafka_assign(rk_, c_parts);
-
-  rd_kafka_topic_partition_list_destroy(c_parts);
-  return static_cast<RdKafka::ErrorCode>(err);
-}
-
-
-RdKafka::ErrorCode
-RdKafka::KafkaConsumerImpl::unassign () {
-  return static_cast<RdKafka::ErrorCode>(rd_kafka_assign(rk_, NULL));
-}
-
-
-RdKafka::ErrorCode
-RdKafka::KafkaConsumerImpl::committed (std::vector<RdKafka::TopicPartition*> &partitions, int timeout_ms) {
-  rd_kafka_topic_partition_list_t *c_parts;
-  rd_kafka_resp_err_t err;
-
-  c_parts = partitions_to_c_parts(partitions);
-
-  err = rd_kafka_committed(rk_, c_parts, timeout_ms);
-
-  if (!err) {
-    update_partitions_from_c_parts(partitions, c_parts);
-  }
-
-  rd_kafka_topic_partition_list_destroy(c_parts);
-
-  return static_cast<RdKafka::ErrorCode>(err);
-}
-
-
-RdKafka::ErrorCode
-RdKafka::KafkaConsumerImpl::position (std::vector<RdKafka::TopicPartition*> &partitions) {
-  rd_kafka_topic_partition_list_t *c_parts;
-  rd_kafka_resp_err_t err;
-
-  c_parts = partitions_to_c_parts(partitions);
-
-  err = rd_kafka_position(rk_, c_parts);
-
-  if (!err) {
-    update_partitions_from_c_parts(partitions, c_parts);
-  }
-
-  rd_kafka_topic_partition_list_destroy(c_parts);
-
-  return static_cast<RdKafka::ErrorCode>(err);
-}
-
-
-RdKafka::ErrorCode
-RdKafka::KafkaConsumerImpl::seek (const RdKafka::TopicPartition &partition,
-                                  int timeout_ms) {
-  const RdKafka::TopicPartitionImpl *p =
-    dynamic_cast<const RdKafka::TopicPartitionImpl*>(&partition);
-  rd_kafka_topic_t *rkt;
-
-  if (!(rkt = rd_kafka_topic_new(rk_, p->topic_.c_str(), NULL)))
-    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
-
-  /* FIXME: Use a C API that takes a topic_partition_list_t instead */
-  RdKafka::ErrorCode err =
-    static_cast<RdKafka::ErrorCode>
-    (rd_kafka_seek(rkt, p->partition_, p->offset_, timeout_ms));
-
-  rd_kafka_topic_destroy(rkt);
-
-  return err;
-}
-
-
-
-
-
-RdKafka::ErrorCode
-RdKafka::KafkaConsumerImpl::close () {
-  rd_kafka_resp_err_t err;
-  err = rd_kafka_consumer_close(rk_);
-  if (err)
-    return static_cast<RdKafka::ErrorCode>(err);
-
-  while (rd_kafka_outq_len(rk_) > 0)
-    rd_kafka_poll(rk_, 10);
-  rd_kafka_destroy(rk_);
-
-  return static_cast<RdKafka::ErrorCode>(err);
-}
-
-
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/Makefile
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/Makefile b/thirdparty/librdkafka-0.11.1/src-cpp/Makefile
deleted file mode 100644
index 7b84b67..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/Makefile
+++ /dev/null
@@ -1,58 +0,0 @@
-PKGNAME=	librdkafka
-LIBNAME=	librdkafka++
-LIBVER=		1
-
-CXXSRCS=	RdKafka.cpp ConfImpl.cpp HandleImpl.cpp \
-		ConsumerImpl.cpp ProducerImpl.cpp KafkaConsumerImpl.cpp \
-		TopicImpl.cpp TopicPartitionImpl.cpp MessageImpl.cpp \
-		QueueImpl.cpp MetadataImpl.cpp
-
-HDRS=		rdkafkacpp.h
-
-OBJS=		$(CXXSRCS:%.cpp=%.o)
-
-
-
-all: lib check
-
-
-include ../mklove/Makefile.base
-
-# No linker script/symbol hiding for C++ library
-WITH_LDS=n
-
-# OSX and Cygwin requires linking required libraries
-ifeq ($(_UNAME_S),Darwin)
-	FWD_LINKING_REQ=y
-endif
-ifeq ($(_UNAME_S),AIX)
-	FWD_LINKING_REQ=y
-endif
-ifeq ($(shell uname -o 2>/dev/null),Cygwin)
-	FWD_LINKING_REQ=y
-endif
-
-# Ignore previously defined library dependencies for the C library,
-# we'll get those dependencies through the C library linkage.
-LIBS := -L../src -lrdkafka -lstdc++
-
-CHECK_FILES+= $(LIBFILENAME) $(LIBNAME).a
-
-
-file-check: lib
-check: file-check
-
-install: lib-install
-
-clean: lib-clean
-
-ifeq ($(WITH_LDS),y)
-# Enable linker script if supported by platform
-LIB_LDFLAGS+= $(LDFLAG_LINKERSCRIPT)$(LIBNAME).lds
-endif
-
-$(LIBNAME).lds: $(HDRS)
-	@(printf "$(MKL_YELLOW)Generating linker script $@ from $(HDRS)$(MKL_CLR_RESET)\n" ; \
-	  cat ../src/rdkafka.h | ../lds-gen.py > $@)
-
--include $(DEPS)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/MessageImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/MessageImpl.cpp b/thirdparty/librdkafka-0.11.1/src-cpp/MessageImpl.cpp
deleted file mode 100644
index 9562402..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/MessageImpl.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * librdkafka - Apache Kafka C/C++ library
- *
- * Copyright (c) 2014 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <string>
-#include <list>
-#include <cerrno>
-
-#include "rdkafkacpp_int.h"
-
-
-RdKafka::Message::~Message() {}
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/MetadataImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/MetadataImpl.cpp b/thirdparty/librdkafka-0.11.1/src-cpp/MetadataImpl.cpp
deleted file mode 100644
index c2869f5..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/MetadataImpl.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * librdkafka - Apache Kafka C/C++ library
- *
- * Copyright (c) 2014 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafkacpp_int.h"
-
-using namespace RdKafka;
-
-BrokerMetadata::~BrokerMetadata() {};
-PartitionMetadata::~PartitionMetadata() {};
-TopicMetadata::~TopicMetadata() {};
-Metadata::~Metadata() {};
-
-
-/**
- * Metadata: Broker information handler implementation
- */
-class BrokerMetadataImpl : public BrokerMetadata {
- public:
-  BrokerMetadataImpl(const rd_kafka_metadata_broker_t *broker_metadata)
-  :broker_metadata_(broker_metadata),host_(broker_metadata->host) {}
-
-  int32_t      id() const{return broker_metadata_->id;}
-
-  const std::string host() const {return host_;}
-  int port() const {return broker_metadata_->port;}
-
-  virtual ~BrokerMetadataImpl() {}
-
- private:
-  const rd_kafka_metadata_broker_t *broker_metadata_;
-  const std::string host_;
-};
-
-/**
- * Metadata: Partition information handler
- */
-class PartitionMetadataImpl : public PartitionMetadata {
- public:
-  // @TODO too much memory copy? maybe we should create a new vector class that read directly from C arrays?
-  // @TODO use auto_ptr?
-  PartitionMetadataImpl(const rd_kafka_metadata_partition_t *partition_metadata)
-  :partition_metadata_(partition_metadata) {
-    replicas_.reserve(partition_metadata->replica_cnt);
-    for(int i=0;i<partition_metadata->replica_cnt;++i)
-      replicas_.push_back(partition_metadata->replicas[i]);
-
-    isrs_.reserve(partition_metadata->isr_cnt);
-    for(int i=0;i<partition_metadata->isr_cnt;++i)
-      isrs_.push_back(partition_metadata->isrs[i]);
-  }
-
-  int32_t                    id() const {
-    return partition_metadata_->id;
-  }
-  int32_t                    leader() const {
-    return partition_metadata_->leader;
-  }
-  ErrorCode                  err() const {
-    return static_cast<ErrorCode>(partition_metadata_->err);
-  }
-
-  const std::vector<int32_t> *replicas() const {return &replicas_;}
-  const std::vector<int32_t> *isrs() const {return &isrs_;}
-
-  ~PartitionMetadataImpl() {};
-
- private:
-  const rd_kafka_metadata_partition_t *partition_metadata_;
-  std::vector<int32_t> replicas_,isrs_;
-};
-
-/**
- * Metadata: Topic information handler
- */
-class TopicMetadataImpl : public TopicMetadata{
- public:
-  TopicMetadataImpl(const rd_kafka_metadata_topic_t *topic_metadata)
-  :topic_metadata_(topic_metadata),topic_(topic_metadata->topic) {
-    partitions_.reserve(topic_metadata->partition_cnt);
-    for(int i=0;i<topic_metadata->partition_cnt;++i)
-      partitions_.push_back(
-        new PartitionMetadataImpl(&topic_metadata->partitions[i])
-      );
-  }
-
-  ~TopicMetadataImpl(){
-    for(size_t i=0;i<partitions_.size();++i)
-      delete partitions_[i];
-  }
-
-  const std::string topic() const {return topic_;}
-  const std::vector<const PartitionMetadata *> *partitions() const {
-    return &partitions_;
-  }
-  ErrorCode err() const {return static_cast<ErrorCode>(topic_metadata_->err);}
-
- private:
-  const rd_kafka_metadata_topic_t *topic_metadata_;
-  const std::string topic_;
-  std::vector<const PartitionMetadata *> partitions_;
-
-};
-
-MetadataImpl::MetadataImpl(const rd_kafka_metadata_t *metadata)
-:metadata_(metadata)
-{
-  brokers_.reserve(metadata->broker_cnt);
-  for(int i=0;i<metadata->broker_cnt;++i)
-    brokers_.push_back(new BrokerMetadataImpl(&metadata->brokers[i]));
-
-  topics_.reserve(metadata->topic_cnt);
-  for(int i=0;i<metadata->topic_cnt;++i)
-    topics_.push_back(new TopicMetadataImpl(&metadata->topics[i]));
-
-}
-
-MetadataImpl::~MetadataImpl() {
-  for(size_t i=0;i<brokers_.size();++i)
-    delete brokers_[i];
-  for(size_t i=0;i<topics_.size();++i)
-    delete topics_[i];
-
-
-  if(metadata_)
-    rd_kafka_metadata_destroy(metadata_);
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/ProducerImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/ProducerImpl.cpp b/thirdparty/librdkafka-0.11.1/src-cpp/ProducerImpl.cpp
deleted file mode 100644
index e880573..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/ProducerImpl.cpp
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * librdkafka - Apache Kafka C/C++ library
- *
- * Copyright (c) 2014 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <string>
-#include <list>
-#include <cerrno>
-
-#include "rdkafkacpp_int.h"
-
-
-RdKafka::Producer::~Producer () {
-
-}
-
-static void dr_msg_cb_trampoline (rd_kafka_t *rk,
-                                  const rd_kafka_message_t *
-                                  rkmessage,
-                                  void *opaque) {
-  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
-  RdKafka::MessageImpl message(NULL, rkmessage);
-  handle->dr_cb_->dr_cb(message);
-}
-
-
-
-RdKafka::Producer *RdKafka::Producer::create (RdKafka::Conf *conf,
-                                              std::string &errstr) {
-  char errbuf[512];
-  RdKafka::ConfImpl *confimpl = dynamic_cast<RdKafka::ConfImpl *>(conf);
-  RdKafka::ProducerImpl *rkp = new RdKafka::ProducerImpl();
-  rd_kafka_conf_t *rk_conf = NULL;
-
-  if (confimpl) {
-    if (!confimpl->rk_conf_) {
-      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
-      delete rkp;
-      return NULL;
-    }
-
-    rkp->set_common_config(confimpl);
-
-    rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_);
-
-    if (confimpl->dr_cb_) {
-      rd_kafka_conf_set_dr_msg_cb(rk_conf, dr_msg_cb_trampoline);
-      rkp->dr_cb_ = confimpl->dr_cb_;
-    }
-  }
-
-
-  rd_kafka_t *rk;
-  if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, rk_conf,
-                          errbuf, sizeof(errbuf)))) {
-    errstr = errbuf;
-    delete rkp;
-    return NULL;
-  }
-
-  rkp->rk_ = rk;
-
-  return rkp;
-}
-
-
-RdKafka::ErrorCode RdKafka::ProducerImpl::produce (RdKafka::Topic *topic,
-                                                   int32_t partition,
-                                                   int msgflags,
-                                                   void *payload, size_t len,
-                                                   const std::string *key,
-                                                   void *msg_opaque) {
-  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
-
-  if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags,
-                       payload, len,
-                       key ? key->c_str() : NULL, key ? key->size() : 0,
-                       msg_opaque) == -1)
-    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
-
-  return RdKafka::ERR_NO_ERROR;
-}
-
-
-RdKafka::ErrorCode RdKafka::ProducerImpl::produce (RdKafka::Topic *topic,
-                                                   int32_t partition,
-                                                   int msgflags,
-                                                   void *payload, size_t len,
-                                                   const void *key,
-                                                   size_t key_len,
-                                                   void *msg_opaque) {
-  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
-
-  if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags,
-                       payload, len, key, key_len,
-                       msg_opaque) == -1)
-    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
-
-  return RdKafka::ERR_NO_ERROR;
-}
-
-
-RdKafka::ErrorCode
-RdKafka::ProducerImpl::produce (RdKafka::Topic *topic,
-                                int32_t partition,
-                                const std::vector<char> *payload,
-                                const std::vector<char> *key,
-                                void *msg_opaque) {
-  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
-
-  if (rd_kafka_produce(topicimpl->rkt_, partition, RD_KAFKA_MSG_F_COPY,
-                       payload ? (void *)&(*payload)[0] : NULL,
-                       payload ? payload->size() : 0,
-                       key ? &(*key)[0] : NULL, key ? key->size() : 0,
-                       msg_opaque) == -1)
-    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
-
-  return RdKafka::ERR_NO_ERROR;
-
-}
-
-
-RdKafka::ErrorCode
-RdKafka::ProducerImpl::produce (const std::string topic_name,
-                                int32_t partition, int msgflags,
-                                void *payload, size_t len,
-                                const void *key, size_t key_len,
-                                int64_t timestamp,
-                                void *msg_opaque) {
-  return
-    static_cast<RdKafka::ErrorCode>
-    (
-     rd_kafka_producev(rk_,
-                       RD_KAFKA_V_TOPIC(topic_name.c_str()),
-                       RD_KAFKA_V_PARTITION(partition),
-                       RD_KAFKA_V_MSGFLAGS(msgflags),
-                       RD_KAFKA_V_VALUE(payload, len),
-                       RD_KAFKA_V_KEY(key, key_len),
-                       RD_KAFKA_V_TIMESTAMP(timestamp),
-                       RD_KAFKA_V_OPAQUE(msg_opaque),
-                       RD_KAFKA_V_END)
-     );
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/QueueImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/QueueImpl.cpp b/thirdparty/librdkafka-0.11.1/src-cpp/QueueImpl.cpp
deleted file mode 100644
index c64b1c1..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/QueueImpl.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * librdkafka - Apache Kafka C/C++ library
- *
- * Copyright (c) 2014 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <cerrno>
-
-#include "rdkafkacpp_int.h"
-
-RdKafka::Queue::~Queue () {
-
-}
-
-RdKafka::Queue *RdKafka::Queue::create (Handle *base) {
-  RdKafka::QueueImpl *queueimpl = new RdKafka::QueueImpl;
-  queueimpl->queue_ = rd_kafka_queue_new(dynamic_cast<HandleImpl*>(base)->rk_);
-  return queueimpl;
-}
-
-RdKafka::ErrorCode
-RdKafka::QueueImpl::forward (Queue *queue) {
-  if (!queue) {
-    rd_kafka_queue_forward(queue_, NULL);
-  } else {
-    QueueImpl *queueimpl = dynamic_cast<QueueImpl *>(queue);
-    rd_kafka_queue_forward(queue_, queueimpl->queue_);
-  }
-  return RdKafka::ERR_NO_ERROR;
-}
-
-RdKafka::Message *RdKafka::QueueImpl::consume (int timeout_ms) {
-  rd_kafka_message_t *rkmessage;
-  rkmessage = rd_kafka_consume_queue(queue_, timeout_ms);
-
-  if (!rkmessage)
-    return new RdKafka::MessageImpl(NULL, RdKafka::ERR__TIMED_OUT);
-
-  return new RdKafka::MessageImpl(rkmessage);
-}
-
-int RdKafka::QueueImpl::poll (int timeout_ms) {
-        return rd_kafka_queue_poll_callback(queue_, timeout_ms);
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/README.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/README.md b/thirdparty/librdkafka-0.11.1/src-cpp/README.md
deleted file mode 100644
index a484589..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-librdkafka C++ interface
-========================
-
-**See rdkafkacpp.h for the public C++ API**
-
-
-
-Maintainer notes for the C++ interface:
-
- * The public C++ interface (rdkafkacpp.h) does not include the
-   public C interface (rdkafka.h) in any way, this means that all
-   constants, flags, etc, must be kept in sync manually between the two
-   header files.
-   A regression test should be implemented that checks this is true.
-
- * The public C++ interface is provided using pure virtual abstract classes.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/RdKafka.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/RdKafka.cpp b/thirdparty/librdkafka-0.11.1/src-cpp/RdKafka.cpp
deleted file mode 100644
index 7b67a7b..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/RdKafka.cpp
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * librdkafka - Apache Kafka C/C++ library
- *
- * Copyright (c) 2014 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string>
-
-#include "rdkafkacpp_int.h"
-
-int RdKafka::version () {
-  return rd_kafka_version();
-}
-
-std::string RdKafka::version_str () {
-  return std::string(rd_kafka_version_str());
-}
-
-std::string RdKafka::get_debug_contexts() {
-	return std::string(RD_KAFKA_DEBUG_CONTEXTS);
-}
-
-std::string RdKafka::err2str (RdKafka::ErrorCode err) {
-  return std::string(rd_kafka_err2str(static_cast<rd_kafka_resp_err_t>(err)));
-}
-
-int RdKafka::wait_destroyed (int timeout_ms) {
-  return rd_kafka_wait_destroyed(timeout_ms);
-}
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/TopicImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/TopicImpl.cpp b/thirdparty/librdkafka-0.11.1/src-cpp/TopicImpl.cpp
deleted file mode 100644
index f330513..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/TopicImpl.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * librdkafka - Apache Kafka C/C++ library
- *
- * Copyright (c) 2014 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <string>
-#include <list>
-#include <cerrno>
-
-#include "rdkafkacpp_int.h"
-
-const int32_t RdKafka::Topic::PARTITION_UA = RD_KAFKA_PARTITION_UA;
-
-const int64_t RdKafka::Topic::OFFSET_BEGINNING = RD_KAFKA_OFFSET_BEGINNING;
-
-const int64_t RdKafka::Topic::OFFSET_END = RD_KAFKA_OFFSET_END;
-
-const int64_t RdKafka::Topic::OFFSET_STORED = RD_KAFKA_OFFSET_STORED;
-
-const int64_t RdKafka::Topic::OFFSET_INVALID = RD_KAFKA_OFFSET_INVALID;
-
-RdKafka::Topic::~Topic () {
-
-}
-
-static int32_t partitioner_cb_trampoline (const rd_kafka_topic_t *rkt,
-                                          const void *keydata,
-                                          size_t keylen,
-                                          int32_t partition_cnt,
-                                          void *rkt_opaque,
-                                          void *msg_opaque) {
-  RdKafka::TopicImpl *topicimpl = static_cast<RdKafka::TopicImpl *>(rkt_opaque);
-  std::string key(static_cast<const char *>(keydata), keylen);
-  return topicimpl->partitioner_cb_->partitioner_cb(topicimpl, &key,
-                                                    partition_cnt, msg_opaque);
-}
-
-static int32_t partitioner_kp_cb_trampoline (const rd_kafka_topic_t *rkt,
-                                             const void *keydata,
-                                             size_t keylen,
-                                             int32_t partition_cnt,
-                                             void *rkt_opaque,
-                                             void *msg_opaque) {
-  RdKafka::TopicImpl *topicimpl = static_cast<RdKafka::TopicImpl *>(rkt_opaque);
-  return topicimpl->partitioner_kp_cb_->partitioner_cb(topicimpl,
-                                                       keydata, keylen,
-                                                       partition_cnt,
-                                                       msg_opaque);
-}
-
-
-
-RdKafka::Topic *RdKafka::Topic::create (Handle *base,
-					const std::string &topic_str,
-					Conf *conf,
-					std::string &errstr) {
-  RdKafka::ConfImpl *confimpl = static_cast<RdKafka::ConfImpl *>(conf);
-  rd_kafka_topic_t *rkt;
-  rd_kafka_topic_conf_t *rkt_conf;
-
-  RdKafka::TopicImpl *topic = new RdKafka::TopicImpl();
-
-  if (!confimpl)
-    rkt_conf = rd_kafka_topic_conf_new();
-  else /* Make a copy of conf struct to allow Conf reuse. */
-    rkt_conf = rd_kafka_topic_conf_dup(confimpl->rkt_conf_);
-
-  /* Set topic opaque to the topic so that we can reach our topic object
-   * from whatever callbacks get registered.
-   * The application itself will not need these opaques since their
-   * callbacks are class based. */
-  rd_kafka_topic_conf_set_opaque(rkt_conf, static_cast<void *>(topic));
-
-  if (confimpl) {
-    if (confimpl->partitioner_cb_) {
-      rd_kafka_topic_conf_set_partitioner_cb(rkt_conf,
-                                             partitioner_cb_trampoline);
-      topic->partitioner_cb_ = confimpl->partitioner_cb_;
-    } else if (confimpl->partitioner_kp_cb_) {
-      rd_kafka_topic_conf_set_partitioner_cb(rkt_conf,
-                                             partitioner_kp_cb_trampoline);
-      topic->partitioner_kp_cb_ = confimpl->partitioner_kp_cb_;
-    }
-  }
-
-
-  if (!(rkt = rd_kafka_topic_new(dynamic_cast<HandleImpl*>(base)->rk_,
-				 topic_str.c_str(), rkt_conf))) {
-    errstr = rd_kafka_err2str(rd_kafka_last_error());
-    delete topic;
-    rd_kafka_topic_conf_destroy(rkt_conf);
-    return NULL;
-  }
-
-  topic->rkt_ = rkt;
-
-  return topic;
-
-}
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/TopicPartitionImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/TopicPartitionImpl.cpp b/thirdparty/librdkafka-0.11.1/src-cpp/TopicPartitionImpl.cpp
deleted file mode 100644
index 71a688c..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/TopicPartitionImpl.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * librdkafka - Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <string>
-#include <vector>
-
-#include "rdkafkacpp_int.h"
-
-RdKafka::TopicPartition::~TopicPartition () {
-}
-
-RdKafka::TopicPartition *
-RdKafka::TopicPartition::create (const std::string &topic, int partition) {
-  return new TopicPartitionImpl(topic, partition);
-}
-
-RdKafka::TopicPartition *
-RdKafka::TopicPartition::create (const std::string &topic, int partition,
-                                 int64_t offset) {
-  return new TopicPartitionImpl(topic, partition, offset);
-}
-
-void
-RdKafka::TopicPartition::destroy (std::vector<TopicPartition*> &partitions) {
-  for (std::vector<TopicPartition*>::iterator it = partitions.begin() ;
-       it != partitions.end(); ++it)
-    delete(*it);
-  partitions.clear();
-}


[49/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/INTRODUCTION.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/INTRODUCTION.md b/thirdparty/librdkafka-0.11.1/INTRODUCTION.md
deleted file mode 100644
index eab9c0d..0000000
--- a/thirdparty/librdkafka-0.11.1/INTRODUCTION.md
+++ /dev/null
@@ -1,566 +0,0 @@
-//@file INTRODUCTION.md
-# Introduction to librdkafka - the Apache Kafka C/C++ client library
-
-
-librdkafka is a high performance C implementation of the Apache
-Kafka client, providing a reliable and performant client for production use.
-librdkafka also provides a native C++ interface.
-
-## Contents
-
-The following chapters are available in this document
-
-  * Performance
-    * Performance numbers
-    * High throughput
-    * Low latency
-    * Compression
-  * Message reliability
-  * Usage
-    * Documentation
-    * Initialization
-    * Configuration
-    * Threads and callbacks
-    * Brokers
-    * Producer API
-    * Consumer API
-  * Appendix
-    * Test detailts
-  
-
-
-
-## Performance
-
-librdkafka is a multi-threaded library designed for use on modern hardware and
-it attempts to keep memory copying at a minimal. The payload of produced or
-consumed messages may pass through without any copying
-(if so desired by the application) putting no limit on message sizes.
-
-librdkafka allows you to decide if high throughput is the name of the game,
-or if a low latency service is required, all through the configuration
-property interface.
-
-The two most important configuration properties for performance tuning are:
-
-  * batch.num.messages - the minimum number of messages to wait for to
-	  accumulate in the local queue before sending off a message set.
-  * queue.buffering.max.ms - how long to wait for batch.num.messages to
-	  fill up in the local queue.
-
-
-### Performance numbers
-
-The following performance numbers stem from tests using the following setup:
-
-  * Intel Quad Core i7 at 3.4GHz, 8GB of memory
-  * Disk performance has been shortcut by setting the brokers' flush
-	configuration properties as so:
-	* `log.flush.interval.messages=10000000`
-	* `log.flush.interval.ms=100000`
-  * Two brokers running on the same machine as librdkafka.
-  * One topic with two partitions.
-  * Each broker is leader for one partition each.
-  * Using `rdkafka_performance` program available in the `examples` subdir.
-
-
-
-	
-
-**Test results**
-
-  * **Test1**: 2 brokers, 2 partitions, required.acks=2, 100 byte messages: 
-	  **850000 messages/second**, **85 MB/second**
-
-  * **Test2**: 1 broker, 1 partition, required.acks=0, 100 byte messages: 
-	  **710000 messages/second**, **71 MB/second**
-	  
-  * **Test3**: 2 broker2, 2 partitions, required.acks=2, 100 byte messages,
-	  snappy compression:
-	  **300000 messages/second**, **30 MB/second**
-
-  * **Test4**: 2 broker2, 2 partitions, required.acks=2, 100 byte messages,
-	  gzip compression:
-	  **230000 messages/second**, **23 MB/second**
-
-
-
-**Note**: See the *Test details* chapter at the end of this document for
-	information about the commands executed, etc.
-
-**Note**: Consumer performance tests will be announced soon.
-
-
-### High throughput
-
-The key to high throughput is message batching - waiting for a certain amount
-of messages to accumulate in the local queue before sending them off in
-one large message set or batch to the peer. This amortizes the messaging
-overhead and eliminates the adverse effect of the round trip time (rtt).
-
-The default settings, batch.num.messages=10000 and queue.buffering.max.ms=1000,
-are suitable for high throughput. This allows librdkafka to wait up to
-1000 ms for up to 10000 messages to accumulate in the local queue before
-sending the accumulate messages to the broker.
-
-These setting are set globally (`rd_kafka_conf_t`) but applies on a
-per topic+partition basis.
-
-
-### Low latency
-
-When low latency messaging is required the "queue.buffering.max.ms" should be
-tuned to the maximum permitted producer-side latency.
-Setting queue.buffering.max.ms to 1 will make sure messages are sent as
-soon as possible. You could check out [How to decrease message latency](https://github.com/edenhill/librdkafka/wiki/How-to-decrease-message-latency)
-to find more details.
-
-
-### Compression
-
-Producer message compression is enabled through the "compression.codec"
-configuration property.
-
-Compression is performed on the batch of messages in the local queue, the
-larger the batch the higher likelyhood of a higher compression ratio.
-The local batch queue size is controlled through the "batch.num.messages" and
-"queue.buffering.max.ms" configuration properties as described in the
-**High throughput** chapter above.
-
-
-
-## Message reliability
-
-Message reliability is an important factor of librdkafka - an application
-can rely fully on librdkafka to deliver a message according to the specified
-configuration ("request.required.acks" and "message.send.max.retries", etc).
-
-If the topic configuration property "request.required.acks" is set to wait
-for message commit acknowledgements from brokers (any value but 0, see
-[`CONFIGURATION.md`](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
-for specifics) then librdkafka will hold on to the message until
-all expected acks have been received, gracefully handling the following events:
-     
-  * Broker connection failure
-  * Topic leader change
-  * Produce errors signaled by the broker
-
-This is handled automatically by librdkafka and the application does not need
-to take any action at any of the above events.
-The message will be resent up to "message.send.max.retries" times before
-reporting a failure back to the application.
-
-The delivery report callback is used by librdkafka to signal the status of
-a message back to the application, it will be called once for each message
-to report the status of message delivery:
-
-  * If `error_code` is non-zero the message delivery failed and the error_code
-    indicates the nature of the failure (`rd_kafka_resp_err_t` enum).
-  * If `error_code` is zero the message has been successfully delivered.
-
-See Producer API chapter for more details on delivery report callback usage.
-
-The delivery report callback is optional.
-
-
-
-
-
-
-## Usage
-
-### Documentation
-
-The librdkafka API is documented in the
-[`rdkafka.h`](https://github.com/edenhill/librdkafka/blob/master/src/rdkafka.h)
-header file, the configuration properties are documented in 
-[`CONFIGURATION.md`](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
-
-### Initialization
-
-The application needs to instantiate a top-level object `rd_kafka_t` which is
-the base container, providing global configuration and shared state.
-It is created by calling `rd_kafka_new()`.
-
-It also needs to instantiate one or more topics (`rd_kafka_topic_t`) to be used
-for producing to or consuming from. The topic object holds topic-specific
-configuration and will be internally populated with a mapping of all available
-partitions and their leader brokers.
-It is created by calling `rd_kafka_topic_new()`.
-
-Both `rd_kafka_t` and `rd_kafka_topic_t` comes with a configuration API which
-is optional.
-Not using the API will cause librdkafka to use its default values which are
-documented in [`CONFIGURATION.md`](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md).
-
-**Note**: An application may create multiple `rd_kafka_t` objects and
-	they share no state.
-
-**Note**: An `rd_kafka_topic_t` object may only be used with the `rd_kafka_t`
-	object it was created from.
-
-
-
-### Configuration
-
-To ease integration with the official Apache Kafka software and lower
-the learning curve, librdkafka implements identical configuration
-properties as found in the official clients of Apache Kafka.
-
-Configuration is applied prior to object creation using the
-`rd_kafka_conf_set()` and `rd_kafka_topic_conf_set()` APIs.
-
-**Note**: The `rd_kafka.._conf_t` objects are not reusable after they have been
-	passed to `rd_kafka.._new()`.
-	The application does not need to free any config resources after a
-	`rd_kafka.._new()` call.
-
-#### Example
-
-    rd_kafka_conf_t *conf;
-    char errstr[512];
-    
-    conf = rd_kafka_conf_new();
-    rd_kafka_conf_set(conf, "compression.codec", "snappy", errstr, sizeof(errstr));
-    rd_kafka_conf_set(conf, "batch.num.messages", "100", errstr, sizeof(errstr));
-    
-    rd_kafka_new(RD_KAFKA_PRODUCER, conf);
-
-
-### Threads and callbacks
-
-librdkafka uses multiple threads internally to fully utilize modern hardware.
-The API is completely thread-safe and the calling application may call any
-of the API functions from any of its own threads at any time.
-
-A poll-based API is used to provide signaling back to the application,
-the application should call rd_kafka_poll() at regular intervals.
-The poll API will call the following configured callbacks (optional):
-
-  * message delivery report callback - signals that a message has been
-    delivered or failed delivery, allowing the application to take action
-    and to release any application resources used in the message.
-  * error callback - signals an error. These errors are usually of an
-    informational nature, i.e., failure to connect to a broker, and the
-    application usually does not need to take any action.
-    The type of error is passed as a rd_kafka_resp_err_t enum value,
-    including both remote broker errors as well as local failures.
-
-
-Optional callbacks not triggered by poll, these may be called from any thread:
-
-  * Logging callback - allows the application to output log messages
-	  generated by librdkafka.
-  * partitioner callback - application provided message partitioner.
-	  The partitioner may be called in any thread at any time, it may be
-	  called multiple times for the same key.
-	  Partitioner function contraints:
-	  * MUST NOT call any rd_kafka_*() functions
-      * MUST NOT block or execute for prolonged periods of time.
-      * MUST return a value between 0 and partition_cnt-1, or the
-          special RD_KAFKA_PARTITION_UA value if partitioning
-              could not be performed.
-
-
-
-### Brokers
-
-librdkafka only needs an initial list of brokers (at least one), called the
-bootstrap brokers.
-It will connect to all the bootstrap brokers, specified by the
-"metadata.broker.list" configuration property or by `rd_kafka_brokers_add()`,
-and query each one for Metadata information which contains the full list of
-brokers, topic, partitions and their leaders in the Kafka cluster.
-
-Broker names are specified as "host[:port]" where the port is optional 
-(default 9092) and the host is either a resolvable hostname or an IPv4 or IPv6
-address.
-If host resolves to multiple addresses librdkafka will round-robin the
-addresses for each connection attempt.
-A DNS record containing all broker address can thus be used to provide a
-reliable bootstrap broker.
-
-### Feature discovery
-
-Apache Kafka broker version 0.10.0 added support for the ApiVersionRequest API
-which allows a client to query a broker for its range of supported API versions.
-
-librdkafka supports this functionality and will query each broker on connect
-for this information (if `api.version.request=true`) and use it to enable or disable
-various protocol features, such as MessageVersion 1 (timestamps), KafkaConsumer, etc.
-
-If the broker fails to respond to the ApiVersionRequest librdkafka will
-assume the broker is too old to support the API and fall back to an older
-broker version's API. These fallback versions are hardcoded in librdkafka
-and is controlled by the `broker.version.fallback` configuration property.
-
-
-
-### Producer API
-
-After setting up the `rd_kafka_t` object with type `RD_KAFKA_PRODUCER` and one
-or more `rd_kafka_topic_t` objects librdkafka is ready for accepting messages
-to be produced and sent to brokers.
-
-The `rd_kafka_produce()` function takes the following arguments:
-
-  * `rkt` - the topic to produce to, previously created with
-	  `rd_kafka_topic_new()`
-  * `partition` - partition to produce to. If this is set to
-	  `RD_KAFKA_PARTITION_UA` (UnAssigned) then the configured partitioner
-		  function will be used to select a target partition.
-  * `msgflags` - 0, or one of:
-	  * `RD_KAFKA_MSG_F_COPY` - librdkafka will immediately make a copy of
-	    the payload. Use this when the payload is in non-persistent
-	    memory, such as the stack.
-	  * `RD_KAFKA_MSG_F_FREE` - let librdkafka free the payload using
-	    `free(3)` when it is done with it.
-	
-	These two flags are mutually exclusive and neither need to be set in
-	which case the payload is neither copied nor freed by librdkafka.
-		
-	If `RD_KAFKA_MSG_F_COPY` flag is not set no data copying will be
-	performed and librdkafka will hold on the payload pointer until
-	the message	has been delivered or fails.
-	The delivery report callback will be called when librdkafka is done
-	with the message to let the application regain ownership of the
-	payload memory.
-	The application must not free the payload in the delivery report
-	callback if `RD_KAFKA_MSG_F_FREE is set`.
-  * `payload`,`len` - the message payload
-  * `key`,`keylen` - an optional message key which can be used for partitioning.
-	  It will be passed to the topic partitioner callback, if any, and
-	  will be attached to the message when sending to the broker.
-  * `msg_opaque` - an optional application-provided per-message opaque pointer
-	  that will be provided in the message delivery callback to let
-	  the application reference a specific message.
-
-
-`rd_kafka_produce()` is a non-blocking API, it will enqueue the message
-on an internal queue and return immediately.
-If the number of queued messages would exceed the "queue.buffering.max.messages"
-configuration property then `rd_kafka_produce()` returns -1 and sets errno
-to `ENOBUFS`, thus providing a backpressure mechanism.
-
-
-**Note**: See `examples/rdkafka_performance.c` for a producer implementation.
-
-
-### Simple Consumer API (legacy)
-
-NOTE: For the high-level KafkaConsumer interface see rd_kafka_subscribe (rdkafka.h) or KafkaConsumer (rdkafkacpp.h)
-
-The consumer API is a bit more stateful than the producer API.
-After creating `rd_kafka_t` with type `RD_KAFKA_CONSUMER` and
-`rd_kafka_topic_t` instances the application must also start the consumer
-for a given partition by calling `rd_kafka_consume_start()`.
-
-`rd_kafka_consume_start()` arguments:
-
-  * `rkt` - the topic to start consuming from, previously created with
-    	  `rd_kafka_topic_new()`.
-  * `partition` - partition to consume from.
-  * `offset` - message offset to start consuming from. This may either be an
-    	     absolute message offset or one of the two special offsets:
-	     `RD_KAFKA_OFFSET_BEGINNING` to start consuming from the beginning
-	     of the partition's queue (oldest message), or
-	     `RD_KAFKA_OFFSET_END` to start consuming at the next message to be
-	     produced to the partition, or
-	     `RD_KAFKA_OFFSET_STORED` to use the offset store.
-
-After a topic+partition consumer has been started librdkafka will attempt
-to keep "queued.min.messages" messages in the local queue by repeatedly
-fetching batches of messages from the broker.
-
-This local message queue is then served to the application through three
-different consume APIs:
-
-  * `rd_kafka_consume()` - consumes a single message
-  * `rd_kafka_consume_batch()` - consumes one or more messages
-  * `rd_kafka_consume_callback()` - consumes all messages in the local
-    queue and calls a callback function for each one.
-
-These three APIs are listed above the ascending order of performance,
-`rd_kafka_consume()` being the slowest and `rd_kafka_consume_callback()` being
-the fastest. The different consume variants are provided to cater for different
-application needs.
-
-A consumed message, as provided or returned by each of the consume functions,
-is represented by the `rd_kafka_message_t` type.
-
-`rd_kafka_message_t` members:
-
-  * `err` - Error signaling back to the application. If this field is non-zero
-    	  the `payload` field should be considered an error message and
-	  `err` is an error code (`rd_kafka_resp_err_t`).
-	  If `err` is zero then the message is a proper fetched message
-	  and `payload` et.al contains message payload data.
-  * `rkt`,`partition` - Topic and partition for this message or error.
-  * `payload`,`len` - Message payload data or error message (err!=0).
-  * `key`,`key_len` - Optional message key as specified by the producer
-  * `offset` - Message offset
-
-Both the `payload` and `key` memory, as well as the message as a whole, is
-owned by librdkafka and must not be used after an `rd_kafka_message_destroy()`
-call. librdkafka will share the same messageset receive buffer memory for all
-message payloads of that messageset to avoid excessive copying which means
-that if the application decides to hang on to a single `rd_kafka_message_t`
-it will hinder the backing memory to be released for all other messages
-from the same messageset.
-
-When the application is done consuming messages from a topic+partition it
-should call `rd_kafka_consume_stop()` to stop the consumer. This will also
-purge any messages currently in the local queue.
-
-
-**Note**: See `examples/rdkafka_performance.c` for a consumer implementation.
-
-
-#### Offset management
-
-Broker based offset management is available for broker version >= 0.9.0
-in conjunction with using the high-level KafkaConsumer interface (see
-rdkafka.h or rdkafkacpp.h)
-
-Offset management is also available through a local offset file store, where the
-offset is periodically written to a local file for each topic+partition
-according to the following topic configuration properties:
-
-  * `auto.commit.enable`
-  * `auto.commit.interval.ms`
-  * `offset.store.path`
-  * `offset.store.sync.interval.ms`
-
-There is currently no support for offset management with ZooKeeper.
-
-
-
-#### Consumer groups
-
-Broker based consumer groups (requires Apache Kafka broker >=0.9) are supported,
-see KafkaConsumer in rdkafka.h or rdkafkacpp.h
-
-
-### Topics
-
-#### Topic auto creation
-
-Topic auto creation is supported by librdkafka.
-The broker needs to be configured with "auto.create.topics.enable=true".
-
-
-
-### Metadata
-
-#### < 0.9.3
-Previous to the 0.9.3 release librdkafka's metadata handling
-was chatty and excessive, which usually isn't a problem in small
-to medium-sized clusters, but in large clusters with a large amount
-of librdkafka clients the metadata requests could hog broker CPU and bandwidth.
-
-#### > 0.9.3
-
-The remaining Metadata sections describe the current behaviour.
-
-**Note:** "Known topics" in the following section means topics for
-          locally created `rd_kafka_topic_t` objects.
-
-
-#### Query reasons
-
-There are four reasons to query metadata:
-
- * brokers - update/populate cluster broker list, so the client can
-             find and connect to any new brokers added.
-
- * specific topic - find leader or partition count for specific topic
-
- * known topics - same, but for all locally known topics.
-
- * all topics - get topic names for consumer group wildcard subscription
-                matching
-
-The above list is sorted so that the sub-sequent entries contain the
-information above, e.g., 'known topics' contains enough information to
-also satisfy 'specific topic' and 'brokers'.
-
-
-#### Caching strategy
-
-The prevalent cache timeout is `metadata.max.age.ms`, any cached entry
-will remain authoritative for this long or until a relevant broker error
-is returned.
-
-
- * brokers - eternally cached, the broker list is additative.
-
- * topics - cached for `metadata.max.age.ms`
-
-
-
-
-## Appendix
-
-### Test details
-
-#### Test1: Produce to two brokers, two partitions, required.acks=2, 100 byte messages
-
-Each broker is leader for one of the two partitions.
-The random partitioner is used (default) and each broker and partition is
-assigned approximately 250000 messages each.
-
-**Command:**
-
-    # examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test1:TwoBrokers:500kmsgs:100bytes" -S 1 -a 2
-	....
-    % 500000 messages and 50000000 bytes sent in 587ms: 851531 msgs/s and 85.15 Mb/s, 0 messages failed, no compression
-
-**Result:**
-
-Message transfer rate is approximately **850000 messages per second**,
-**85 megabytes per second**.
-
-
-
-#### Test2: Produce to one broker, one partition, required.acks=0, 100 byte messages
-
-**Command:**
-
-    # examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test2:OneBrokers:500kmsgs:100bytes" -S 1 -a 0 -p 1
-	....
-	% 500000 messages and 50000000 bytes sent in 698ms: 715994 msgs/s and 71.60 Mb/s, 0 messages failed, no compression
-
-**Result:**
-
-Message transfer rate is approximately **710000 messages per second**,
-**71 megabytes per second**.
-
-
-
-#### Test3: Produce to two brokers, two partitions, required.acks=2, 100 byte messages, snappy compression
-
-**Command:**
-
-	# examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test3:TwoBrokers:500kmsgs:100bytes:snappy" -S 1 -a 2 -z snappy
-	....
-	% 500000 messages and 50000000 bytes sent in 1672ms: 298915 msgs/s and 29.89 Mb/s, 0 messages failed, snappy compression
-
-**Result:**
-
-Message transfer rate is approximately **300000 messages per second**,
-**30 megabytes per second**.
-
-
-#### Test4: Produce to two brokers, two partitions, required.acks=2, 100 byte messages, gzip compression
-
-**Command:**
-
-	# examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test3:TwoBrokers:500kmsgs:100bytes:gzip" -S 1 -a 2 -z gzip
-	....
-	% 500000 messages and 50000000 bytes sent in 2111ms: 236812 msgs/s and 23.68 Mb/s, 0 messages failed, gzip compression
-
-**Result:**
-
-Message transfer rate is approximately **230000 messages per second**,
-**23 megabytes per second**.
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/LICENSE
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/LICENSE b/thirdparty/librdkafka-0.11.1/LICENSE
deleted file mode 100644
index ba78cc2..0000000
--- a/thirdparty/librdkafka-0.11.1/LICENSE
+++ /dev/null
@@ -1,25 +0,0 @@
-librdkafka - Apache Kafka C driver library
-
-Copyright (c) 2012, Magnus Edenhill
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met: 
-
-1. Redistributions of source code must retain the above copyright notice,
-   this list of conditions and the following disclaimer. 
-2. Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution. 
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/LICENSE.crc32c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/LICENSE.crc32c b/thirdparty/librdkafka-0.11.1/LICENSE.crc32c
deleted file mode 100644
index 482a345..0000000
--- a/thirdparty/librdkafka-0.11.1/LICENSE.crc32c
+++ /dev/null
@@ -1,28 +0,0 @@
-# For src/crc32c.c copied (with modifications) from
-# http://stackoverflow.com/a/17646775/1821055
-
-/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction
- * Copyright (C) 2013 Mark Adler
- * Version 1.1  1 Aug 2013  Mark Adler
- */
-
-/*
-  This software is provided 'as-is', without any express or implied
-  warranty.  In no event will the author be held liable for any damages
-  arising from the use of this software.
-
-  Permission is granted to anyone to use this software for any purpose,
-  including commercial applications, and to alter it and redistribute it
-  freely, subject to the following restrictions:
-
-  1. The origin of this software must not be misrepresented; you must not
-     claim that you wrote the original software. If you use this software
-     in a product, an acknowledgment in the product documentation would be
-     appreciated but is not required.
-  2. Altered source versions must be plainly marked as such, and must not be
-     misrepresented as being the original software.
-  3. This notice may not be removed or altered from any source distribution.
-
-  Mark Adler
-  madler@alumni.caltech.edu
- */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/LICENSE.lz4
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/LICENSE.lz4 b/thirdparty/librdkafka-0.11.1/LICENSE.lz4
deleted file mode 100644
index 353dfb4..0000000
--- a/thirdparty/librdkafka-0.11.1/LICENSE.lz4
+++ /dev/null
@@ -1,26 +0,0 @@
-src/xxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3
-
-LZ4 Library
-Copyright (c) 2011-2016, Yann Collet
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice, this
-  list of conditions and the following disclaimer in the documentation and/or
-  other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/LICENSE.pycrc
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/LICENSE.pycrc b/thirdparty/librdkafka-0.11.1/LICENSE.pycrc
deleted file mode 100644
index 71baded..0000000
--- a/thirdparty/librdkafka-0.11.1/LICENSE.pycrc
+++ /dev/null
@@ -1,23 +0,0 @@
-The following license applies to the files rdcrc32.c and rdcrc32.h which
-have been generated by the pycrc tool.
-============================================================================
-
-Copyright (c) 2006-2012, Thomas Pircher <te...@gmx.net>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/LICENSE.queue
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/LICENSE.queue b/thirdparty/librdkafka-0.11.1/LICENSE.queue
deleted file mode 100644
index 14bbf93..0000000
--- a/thirdparty/librdkafka-0.11.1/LICENSE.queue
+++ /dev/null
@@ -1,31 +0,0 @@
-For sys/queue.h:
-
- * Copyright (c) 1991, 1993
- *	The Regents of the University of California.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *	@(#)queue.h	8.5 (Berkeley) 8/20/94
- * $FreeBSD$
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/LICENSE.regexp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/LICENSE.regexp b/thirdparty/librdkafka-0.11.1/LICENSE.regexp
deleted file mode 100644
index 5fa0b10..0000000
--- a/thirdparty/librdkafka-0.11.1/LICENSE.regexp
+++ /dev/null
@@ -1,5 +0,0 @@
-regexp.c and regexp.h from https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684
-
-"
-These libraries are in the public domain (or the equivalent where that is not possible). You can do anything you want with them. You have no legal obligation to do anything else, although I appreciate attribution.
-"

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/LICENSE.snappy
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/LICENSE.snappy b/thirdparty/librdkafka-0.11.1/LICENSE.snappy
deleted file mode 100644
index baa6cfe..0000000
--- a/thirdparty/librdkafka-0.11.1/LICENSE.snappy
+++ /dev/null
@@ -1,36 +0,0 @@
-######################################################################
-# LICENSE.snappy covers files: snappy.c, snappy.h, snappy_compat.h   #
-# originally retrieved from http://github.com/andikleen/snappy-c     #
-# git revision 8015f2d28739b9a6076ebaa6c53fe27bc238d219              #
-######################################################################
-
-The snappy-c code is under the same license as the original snappy source
-
-Copyright 2011 Intel Corporation All Rights Reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-    * Neither the name of Intel Corporation nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/LICENSE.tinycthread
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/LICENSE.tinycthread b/thirdparty/librdkafka-0.11.1/LICENSE.tinycthread
deleted file mode 100644
index 0ceadef..0000000
--- a/thirdparty/librdkafka-0.11.1/LICENSE.tinycthread
+++ /dev/null
@@ -1,26 +0,0 @@
-From https://github.com/tinycthread/tinycthread/README.txt c57166cd510ffb5022dd5f127489b131b61441b9
-
-License
--------
-
-Copyright (c) 2012 Marcus Geelnard
-              2013-2014 Evan Nemerson
-
-This software is provided 'as-is', without any express or implied
-warranty. In no event will the authors be held liable for any damages
-arising from the use of this software.
-
-Permission is granted to anyone to use this software for any purpose,
-including commercial applications, and to alter it and redistribute it
-freely, subject to the following restrictions:
-
-    1. The origin of this software must not be misrepresented; you must not
-    claim that you wrote the original software. If you use this software
-    in a product, an acknowledgment in the product documentation would be
-    appreciated but is not required.
-
-    2. Altered source versions must be plainly marked as such, and must not be
-    misrepresented as being the original software.
-
-    3. This notice may not be removed or altered from any source
-    distribution.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/LICENSE.wingetopt
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/LICENSE.wingetopt b/thirdparty/librdkafka-0.11.1/LICENSE.wingetopt
deleted file mode 100644
index 4c28701..0000000
--- a/thirdparty/librdkafka-0.11.1/LICENSE.wingetopt
+++ /dev/null
@@ -1,49 +0,0 @@
-For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt
-
-/*
- * Copyright (c) 2002 Todd C. Miller <To...@courtesan.com>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- * Sponsored in part by the Defense Advanced Research Projects
- * Agency (DARPA) and Air Force Research Laboratory, Air Force
- * Materiel Command, USAF, under agreement number F39502-99-1-0512.
- */
-/*-
- * Copyright (c) 2000 The NetBSD Foundation, Inc.
- * All rights reserved.
- *
- * This code is derived from software contributed to The NetBSD Foundation
- * by Dieter Baron and Thomas Klausner.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/LICENSES.txt
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/LICENSES.txt b/thirdparty/librdkafka-0.11.1/LICENSES.txt
deleted file mode 100644
index ea10b97..0000000
--- a/thirdparty/librdkafka-0.11.1/LICENSES.txt
+++ /dev/null
@@ -1,284 +0,0 @@
-LICENSE
---------------------------------------------------------------
-librdkafka - Apache Kafka C driver library
-
-Copyright (c) 2012, Magnus Edenhill
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met: 
-
-1. Redistributions of source code must retain the above copyright notice,
-   this list of conditions and the following disclaimer. 
-2. Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution. 
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-
-LICENSE.crc32c
---------------------------------------------------------------
-# For src/crc32c.c copied (with modifications) from
-# http://stackoverflow.com/a/17646775/1821055
-
-/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction
- * Copyright (C) 2013 Mark Adler
- * Version 1.1  1 Aug 2013  Mark Adler
- */
-
-/*
-  This software is provided 'as-is', without any express or implied
-  warranty.  In no event will the author be held liable for any damages
-  arising from the use of this software.
-
-  Permission is granted to anyone to use this software for any purpose,
-  including commercial applications, and to alter it and redistribute it
-  freely, subject to the following restrictions:
-
-  1. The origin of this software must not be misrepresented; you must not
-     claim that you wrote the original software. If you use this software
-     in a product, an acknowledgment in the product documentation would be
-     appreciated but is not required.
-  2. Altered source versions must be plainly marked as such, and must not be
-     misrepresented as being the original software.
-  3. This notice may not be removed or altered from any source distribution.
-
-  Mark Adler
-  madler@alumni.caltech.edu
- */
-
-
-LICENSE.lz4
---------------------------------------------------------------
-src/xxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3
-
-LZ4 Library
-Copyright (c) 2011-2016, Yann Collet
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice, this
-  list of conditions and the following disclaimer in the documentation and/or
-  other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-LICENSE.pycrc
---------------------------------------------------------------
-The following license applies to the files rdcrc32.c and rdcrc32.h which
-have been generated by the pycrc tool.
-============================================================================
-
-Copyright (c) 2006-2012, Thomas Pircher <te...@gmx.net>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-
-LICENSE.queue
---------------------------------------------------------------
-For sys/queue.h:
-
- * Copyright (c) 1991, 1993
- *	The Regents of the University of California.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *	@(#)queue.h	8.5 (Berkeley) 8/20/94
- * $FreeBSD$
-
-LICENSE.regexp
---------------------------------------------------------------
-regexp.c and regexp.h from https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684
-
-"
-These libraries are in the public domain (or the equivalent where that is not possible). You can do anything you want with them. You have no legal obligation to do anything else, although I appreciate attribution.
-"
-
-
-LICENSE.snappy
---------------------------------------------------------------
-######################################################################
-# LICENSE.snappy covers files: snappy.c, snappy.h, snappy_compat.h   #
-# originally retrieved from http://github.com/andikleen/snappy-c     #
-# git revision 8015f2d28739b9a6076ebaa6c53fe27bc238d219              #
-######################################################################
-
-The snappy-c code is under the same license as the original snappy source
-
-Copyright 2011 Intel Corporation All Rights Reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-    * Neither the name of Intel Corporation nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-LICENSE.tinycthread
---------------------------------------------------------------
-From https://github.com/tinycthread/tinycthread/README.txt c57166cd510ffb5022dd5f127489b131b61441b9
-
-License
--------
-
-Copyright (c) 2012 Marcus Geelnard
-              2013-2014 Evan Nemerson
-
-This software is provided 'as-is', without any express or implied
-warranty. In no event will the authors be held liable for any damages
-arising from the use of this software.
-
-Permission is granted to anyone to use this software for any purpose,
-including commercial applications, and to alter it and redistribute it
-freely, subject to the following restrictions:
-
-    1. The origin of this software must not be misrepresented; you must not
-    claim that you wrote the original software. If you use this software
-    in a product, an acknowledgment in the product documentation would be
-    appreciated but is not required.
-
-    2. Altered source versions must be plainly marked as such, and must not be
-    misrepresented as being the original software.
-
-    3. This notice may not be removed or altered from any source
-    distribution.
-
-
-LICENSE.wingetopt
---------------------------------------------------------------
-For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt
-
-/*
- * Copyright (c) 2002 Todd C. Miller <To...@courtesan.com>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- * Sponsored in part by the Defense Advanced Research Projects
- * Agency (DARPA) and Air Force Research Laboratory, Air Force
- * Materiel Command, USAF, under agreement number F39502-99-1-0512.
- */
-/*-
- * Copyright (c) 2000 The NetBSD Foundation, Inc.
- * All rights reserved.
- *
- * This code is derived from software contributed to The NetBSD Foundation
- * by Dieter Baron and Thomas Klausner.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/Makefile
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/Makefile b/thirdparty/librdkafka-0.11.1/Makefile
deleted file mode 100755
index e428c83..0000000
--- a/thirdparty/librdkafka-0.11.1/Makefile
+++ /dev/null
@@ -1,68 +0,0 @@
-LIBSUBDIRS=	src src-cpp
-
-CHECK_FILES+=	CONFIGURATION.md \
-		examples/rdkafka_example examples/rdkafka_performance \
-		examples/rdkafka_example_cpp
-
-PACKAGE_NAME?=	librdkafka
-VERSION?=	$(shell python packaging/get_version.py src/rdkafka.h)
-
-# Jenkins CI integration
-BUILD_NUMBER ?= 1
-
-.PHONY:
-
-all: mklove-check libs CONFIGURATION.md check
-
-include mklove/Makefile.base
-
-libs:
-	@(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d || exit $?; done)
-
-CONFIGURATION.md: src/rdkafka.h examples
-	@printf "$(MKL_YELLOW)Updating$(MKL_CLR_RESET)\n"
-	@echo '//@file' > CONFIGURATION.md.tmp
-	@(examples/rdkafka_performance -X list >> CONFIGURATION.md.tmp; \
-		cmp CONFIGURATION.md CONFIGURATION.md.tmp || \
-		mv CONFIGURATION.md.tmp CONFIGURATION.md; \
-		rm -f CONFIGURATION.md.tmp)
-
-file-check: CONFIGURATION.md LICENSES.txt examples
-check: file-check
-	@(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d $@ || exit $?; done)
-
-install:
-	@(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d $@ || exit $?; done)
-
-examples tests: .PHONY libs
-	$(MAKE) -C $@
-
-docs:
-	doxygen Doxyfile
-	@echo "Documentation generated in staging-docs"
-
-clean-docs:
-	rm -rf staging-docs
-
-clean:
-	@$(MAKE) -C tests $@
-	@$(MAKE) -C examples $@
-	@(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d $@ ; done)
-
-distclean: clean
-	./configure --clean
-	rm -f config.log config.log.old
-
-archive:
-	git archive --prefix=$(PACKAGE_NAME)-$(VERSION)/ \
-		-o $(PACKAGE_NAME)-$(VERSION).tar.gz HEAD
-	git archive --prefix=$(PACKAGE_NAME)-$(VERSION)/ \
-		-o $(PACKAGE_NAME)-$(VERSION).zip HEAD
-
-rpm: distclean
-	$(MAKE) -C packaging/rpm
-
-LICENSES.txt: .PHONY
-	@(for i in LICENSE LICENSE.*[^~] ; do (echo "$$i" ; echo "--------------------------------------------------------------" ; cat $$i ; echo "" ; echo "") ; done) > $@.tmp
-	@cmp $@ $@.tmp || mv $@.tmp $@ ; rm -f $@.tmp
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/README.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/README.md b/thirdparty/librdkafka-0.11.1/README.md
deleted file mode 100644
index 8e4a55f..0000000
--- a/thirdparty/librdkafka-0.11.1/README.md
+++ /dev/null
@@ -1,160 +0,0 @@
-librdkafka - the Apache Kafka C/C++ client library
-==================================================
-
-Copyright (c) 2012-2016, [Magnus Edenhill](http://www.edenhill.se/).
-
-[https://github.com/edenhill/librdkafka](https://github.com/edenhill/librdkafka)
-
-[![Gitter chat](https://badges.gitter.im/edenhill/librdkafka.png)](https://gitter.im/edenhill/librdkafka) [![Build status](https://doozer.io/badge/edenhill/librdkafka/buildstatus/master)](https://doozer.io/user/edenhill/librdkafka)
-
-
-**librdkafka** is a C library implementation of the
-[Apache Kafka](http://kafka.apache.org/) protocol, containing both
-Producer and Consumer support. It was designed with message delivery reliability
-and high performance in mind, current figures exceed 1 million msgs/second for
-the producer and 3 million msgs/second for the consumer.
-
-**librdkafka** is licensed under the 2-clause BSD license.
-
-For an introduction to the performance and usage of librdkafka, see
-[INTRODUCTION.md](https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md)
-
-See the [wiki](https://github.com/edenhill/librdkafka/wiki) for a FAQ.
-
-**NOTE**: The `master` branch is actively developed, use latest release for production use.
-
-
-# Overview #
-  * High-level producer
-  * High-level balanced KafkaConsumer (requires broker >= 0.9)
-  * Simple (legacy) consumer
-  * Compression: snappy, gzip, lz4
-  * [SSL](https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka) support
-  * [SASL](https://github.com/edenhill/librdkafka/wiki/Using-SASL-with-librdkafka) (GSSAPI/Kerberos/SSPI, PLAIN, SCRAM) support
-  * Broker version support: >=0.8 (see [Broker version compatibility](https://github.com/edenhill/librdkafka/wiki/Broker-version-compatibility))
-  * Stable C & C++ APIs (ABI safety guaranteed for C)
-  * [Statistics](https://github.com/edenhill/librdkafka/wiki/Statistics) metrics
-  * Debian package: librdkafka1 and librdkafka-dev in Debian and Ubuntu
-  * RPM package: librdkafka and librdkafka-devel
-  * Gentoo package: dev-libs/librdkafka
-  * Portable: runs on Linux, OSX, Win32, Solaris, FreeBSD, ...
-
-
-# Language bindings #
-
-  * C#/.NET: [confluent-kafka-dotnet](https://github.com/confluentinc/confluent-kafka-dotnet) (based on [rdkafka-dotnet](https://github.com/ah-/rdkafka-dotnet))
-  * C++: [cppkafka](https://github.com/mfontanini/cppkafka)
-  * D (C-like): [librdkafka](https://github.com/DlangApache/librdkafka/)
-  * D (C++-like): [librdkafkad](https://github.com/tamediadigital/librdkafka-d)
-  * Erlang: [erlkaf](https://github.com/silviucpp/erlkaf)
-  * Go: [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go)
-  * Haskell (kafka, conduit, avro, schema registry): [hw-kafka](https://github.com/haskell-works/hw-kafka)
-  * Haskell: [haskakafka](https://github.com/cosbynator/haskakafka)
-  * Haskell: [haskell-kafka](https://github.com/yanatan16/haskell-kafka)
-  * Lua: [luardkafka](https://github.com/mistsv/luardkafka)
-  * Node.js: [node-rdkafka](https://github.com/Blizzard/node-rdkafka)
-  * Node.js: [node-kafka](https://github.com/sutoiku/node-kafka)
-  * Node.js: [kafka-native](https://github.com/jut-io/node-kafka-native)
-  * OCaml: [ocaml-kafka](https://github.com/didier-wenzek/ocaml-kafka)
-  * PHP: [phpkafka](https://github.com/EVODelavega/phpkafka)
-  * PHP: [php-rdkafka](https://github.com/arnaud-lb/php-rdkafka)
-  * Python: [confluent-kafka-python](https://github.com/confluentinc/confluent-kafka-python)
-  * Python: [PyKafka](https://github.com/Parsely/pykafka)
-  * Ruby: [Hermann](https://github.com/reiseburo/hermann)
-  * Rust: [rust-rdkafka](https://github.com/fede1024/rust-rdkafka)
-  * Tcl: [KafkaTcl](https://github.com/flightaware/kafkatcl)
-  * Swift: [Perfect-Kafka](https://github.com/PerfectlySoft/Perfect-Kafka)
-
-# Users of librdkafka #
-
-  * [kafkacat](https://github.com/edenhill/kafkacat) - Apache Kafka swiss army knife
-  * [Wikimedia's varnishkafka](https://github.com/wikimedia/varnishkafka) - Varnish cache web log producer
-  * [Wikimedia's kafkatee](https://github.com/wikimedia/analytics-kafkatee) - Kafka multi consumer with filtering and fanout
-  * [rsyslog](http://www.rsyslog.com)
-  * [syslog-ng](http://syslog-ng.org)
-  * [collectd](http://collectd.org)
-  * [logkafka](https://github.com/Qihoo360/logkafka) - Collect logs and send to Kafka
-  * [redBorder](http://www.redborder.net)
-  * [Headweb](http://www.headweb.com/)
-  * [Produban's log2kafka](https://github.com/Produban/log2kafka) - Web log producer
-  * [fuse_kafka](https://github.com/yazgoo/fuse_kafka) - FUSE file system layer
-  * [node-kafkacat](https://github.com/Rafflecopter/node-kafkacat)
-  * [OVH](http://ovh.com) - [AntiDDOS](http://www.slideshare.net/hugfrance/hugfr-6-oct2014ovhantiddos)
-  * [otto.de](http://otto.de)'s [trackdrd](https://github.com/otto-de/trackrdrd) - Varnish log reader
-  * [Microwish](https://github.com/microwish) has a range of Kafka utilites for log aggregation, HDFS integration, etc.
-  * [aidp](https://github.com/weiboad/aidp) - kafka consumer embedded Lua scripting language in data process framework 
-  * large unnamed financial institution
-  * *Let [me](mailto:rdkafka@edenhill.se) know if you are using librdkafka*
-
-
-
-# Usage
-
-## Requirements
-	The GNU toolchain
-	GNU make
-   	pthreads
-	zlib (optional, for gzip compression support)
-	libssl-dev (optional, for SSL and SASL SCRAM support)
-	libsasl2-dev (optional, for SASL GSSAPI support)
-
-## Instructions
-
-### Building
-
-      ./configure
-      make
-      sudo make install
-
-
-**NOTE**: See [README.win32](README.win32) for instructions how to build
-          on Windows with Microsoft Visual Studio.
-
-### Usage in code
-
-See [examples/rdkafka_example.c](https://github.com/edenhill/librdkafka/blob/master/examples/rdkafka_example.c) for an example producer and consumer.
-
-Link your program with `-lrdkafka -lz -lpthread -lrt`.
-
-
-## Documentation
-
-The public APIs are documented in their respective header files:
- * The **C** API is documented in [src/rdkafka.h](src/rdkafka.h)
- * The **C++** API is documented in [src-cpp/rdkafkacpp.h](src-cpp/rdkafkacpp.h)
-
-To generate Doxygen documents for the API, type:
-
-    make docs
-
-
-Configuration properties are documented in
-[CONFIGURATION.md](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
-
-For a librdkafka introduction, see
-[INTRODUCTION.md](https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md)
-
-
-## Examples
-
-See the `examples/`sub-directory.
-
-
-## Tests
-
-See the `tests/`sub-directory.
-
-
-## Support
-
-File bug reports, feature requests and questions using
-[GitHub Issues](https://github.com/edenhill/librdkafka/issues)
-
-
-Questions and discussions are also welcome on irc.freenode.org, #apache-kafka,
-nickname Snaps.
-
-
-### Commercial support
-
-Commercial support is available from [Edenhill services](http://www.edenhill.se)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/README.win32
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/README.win32 b/thirdparty/librdkafka-0.11.1/README.win32
deleted file mode 100644
index de9b5e4..0000000
--- a/thirdparty/librdkafka-0.11.1/README.win32
+++ /dev/null
@@ -1,28 +0,0 @@
-
-Native win32 build instructions using Microsoft Visual Studio 2013 (MSVC).
-
-Requirements:
- * zlib is installed automatically from NuGet,
-   but probably requires the NuGet VS extension.
- * OpenSSL-win32 must be installed in C:\OpenSSL-win32.
-   Download and install the latest v1.0.2 non-light package from:
-   https://slproweb.com/products/Win32OpenSSL.html
-   (This would be using NuGet too but the current
-    OpenSSL packages are outdated and with broken
-    dependencies, so no luck)
-
-The Visual Studio solution file for librdkafka resides in win32/librdkafka.sln
-
-Artifacts:
- - C library
- - C++ library
- - rdkafka_example
- - tests
-
- Missing:
-  - remaining tools (rdkafka_performance, etc)
-  - SASL support (no official Cyrus libsasl2 DLLs available)
-
-If you build librdkafka with an external tool (ie CMake) you can get rid of the 
-__declspec(dllexport) / __declspec(dllimport) decorations by adding a define
--DLIBRDKAFKA_STATICLIB to your CFLAGS

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/config.h.in
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/config.h.in b/thirdparty/librdkafka-0.11.1/config.h.in
deleted file mode 100644
index bc3e6ee..0000000
--- a/thirdparty/librdkafka-0.11.1/config.h.in
+++ /dev/null
@@ -1,39 +0,0 @@
-#cmakedefine01 WITHOUT_OPTIMIZATION
-#cmakedefine01 ENABLE_DEVEL
-#cmakedefine01 ENABLE_REFCNT_DEBUG
-#cmakedefine01 ENABLE_SHAREDPTR_DEBUG
-
-#cmakedefine01 HAVE_ATOMICS_32
-#cmakedefine01 HAVE_ATOMICS_32_SYNC
-
-#if (HAVE_ATOMICS_32)
-# if (HAVE_ATOMICS_32_SYNC)
-#  define ATOMIC_OP32(OP1,OP2,PTR,VAL) __sync_ ## OP1 ## _and_ ## OP2(PTR, VAL)
-# else
-#  define ATOMIC_OP32(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)
-# endif
-#endif
-
-#cmakedefine01 HAVE_ATOMICS_64
-#cmakedefine01 HAVE_ATOMICS_64_SYNC
-
-#if (HAVE_ATOMICS_64)
-# if (HAVE_ATOMICS_64_SYNC)
-#  define ATOMIC_OP64(OP1,OP2,PTR,VAL) __sync_ ## OP1 ## _and_ ## OP2(PTR, VAL)
-# else
-#  define ATOMIC_OP64(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)
-# endif
-#endif
-
-
-#cmakedefine01 WITH_ZLIB
-#cmakedefine01 WITH_LIBDL
-#cmakedefine01 WITH_PLUGINS
-#define WITH_SNAPPY 1
-#define WITH_SOCKEM 1
-#cmakedefine01 WITH_SSL
-#cmakedefine01 WITH_SASL
-#cmakedefine01 WITH_SASL_SCRAM
-#cmakedefine01 WITH_SASL_CYRUS
-#cmakedefine01 HAVE_REGEX
-#cmakedefine01 HAVE_STRNDUP

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/configure
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/configure b/thirdparty/librdkafka-0.11.1/configure
deleted file mode 100755
index a76452a..0000000
--- a/thirdparty/librdkafka-0.11.1/configure
+++ /dev/null
@@ -1,214 +0,0 @@
-#!/usr/bin/env bash
-#
-
-BASHVER=$(expr ${BASH_VERSINFO[0]} \* 1000 + ${BASH_VERSINFO[1]})
-
-if [ "$BASHVER" -lt 3002 ]; then
-    echo "ERROR: mklove requires bash version 3.2 or later but you are using $BASH_VERSION ($BASHVER)"
-    echo "       See https://github.com/edenhill/mklove/issues/15"
-    exit 1
-fi
-
-MKL_CONFIGURE_ARGS="$0 $*"
-
-# Load base module
-source mklove/modules/configure.base
-
-# Read some special command line options right away that must be known prior to
-# sourcing modules.
-mkl_in_list "$*" "--no-download" && MKL_NO_DOWNLOAD=1
-# Disable downloads when --help is used to avoid blocking calls.
-mkl_in_list "$*" "--help" && MKL_NO_DOWNLOAD=1
-mkl_in_list "$*" "--debug" && MKL_DEBUG=1
-
-# This is the earliest possible time to check for color support in
-# terminal because mkl_check_terminal_color_support uses mkl_dbg which
-# needs to know if MKL_DEBUG is set
-mkl_check_terminal_color_support
-
-# Delete temporary Makefile and header files on exit.
-trap "{ rm -f $MKL_OUTMK $MKL_OUTH; }" EXIT
-
-
-
-##
-## Load builtin modules
-##
-
-# Builtin options, etc.
-mkl_require builtin
-
-# Host/target support
-mkl_require host
-
-# Compiler detection
-mkl_require cc
-
-
-# Load application provided modules (in current directory), if any.
-for fname in configure.* ; do
-    if [[ $fname = 'configure.*' ]]; then
-        continue
-    fi
-
-    # Skip temporary files
-    if [[ $fname = *~ ]]; then
-        continue
-    fi
-
-    mkl_require $fname
-done
-
-
-
-
-##
-## Argument parsing (options)
-##
-##
-
-_SAVE_ARGS="$*"
-
-# Parse arguments
-while [[ ! -z $@ ]]; do
-    if [[ $1 != --* ]]; then
-        mkl_err "Unknown non-option argument: $1"
-        mkl_usage
-        exit 1
-    fi
-
-    opt=${1#--}
-    shift
-
-    if [[ $opt = *=* ]]; then
-        name="${opt%=*}"
-        arg="${opt#*=}"
-        eqarg=1
-    else
-        name="$opt"
-        arg=""
-        eqarg=0
-    fi
-
-    safeopt="$(mkl_env_esc $name)"
-
-    if ! mkl_func_exists opt_$safeopt ; then
-        mkl_err "Unknown option $opt"
-        mkl_usage
-        exit 1
-    fi
-
-    # Check if this option needs an argument.
-    reqarg=$(mkl_meta_get "MKL_OPT_ARGS" "$(mkl_env_esc $name)")
-    if [[ ! -z $reqarg ]]; then
-        if [[ $eqarg == 0 && -z $arg ]]; then
-            arg=$1
-            shift
-
-            if [[ -z $arg ]]; then
-                mkl_err "Missing argument to option --$name $reqarg"
-                exit 1
-            fi
-        fi
-    else
-        if [[ ! -z $arg ]]; then
-            mkl_err "Option --$name expects no argument"
-            exit 1
-        fi
-        arg=y
-    fi
-
-    case $name in
-        re|reconfigure)
-            oldcmd=$(head -1 config.log | grep '^# configure exec: ' | \
-                sed -e 's/^\# configure exec: [^ ]*configure//')
-            echo "Reconfiguring: $0 $oldcmd"
-            exec $0 $oldcmd
-            ;;
-
-        list-modules)
-            echo "Modules loaded:"
-            for mod in $MKL_MODULES ; do
-                echo "  $mod"
-            done
-            exit 0
-            ;;
-
-        list-checks)
-            echo "Check functions in calling order:"
-            for mf in $MKL_CHECKS ; do
-                mod=${mf%:*}
-                func=${mf#*:}
-                echo -e "${MKL_GREEN}From module $mod:$MKL_CLR_RESET"
-                declare -f $func
-                echo ""
-            done
-            exit 0
-            ;;
-
-        update-modules)
-            fails=0
-            echo "Updating modules"
-            for mod in $MKL_MODULES ; do
-                echo -n "Updating $mod..."
-                if mkl_module_download "$mod" > /dev/null ; then
-                    echo -e "${MKL_GREEN}ok${MKL_CLR_RESET}"
-                else
-                    echo -e "${MKL_RED}failed${MKL_CLR_RESET}"
-                    fails=$(expr $fails + 1)
-                fi
-            done
-            exit $fails
-            ;;
-
-        help)
-            mkl_usage
-            exit 0
-            ;;
-
-        *)
-            opt_$safeopt $arg || exit 1
-            mkl_var_append MKL_OPTS_SET "$safeopt"
-            ;;
-    esac
-done
-
-if [[ ! -z $MKL_CLEAN ]]; then
-    mkl_clean
-    exit 0
-fi
-
-# Move away previous log file
-[[ -f $MKL_OUTDBG ]] && mv $MKL_OUTDBG ${MKL_OUTDBG}.old
-
-
-# Create output files
-echo "# configure exec: $0 $_SAVE_ARGS" >> $MKL_OUTDBG
-echo "# On $(date)" >> $MKL_OUTDBG
-
-rm -f $MKL_OUTMK $MKL_OUTH
-
-
-# Load cache file
-mkl_cache_read
-
-# Run checks
-mkl_checks_run
-
-# Check accumulated failures, will not return on failure.
-mkl_check_fails
-
-# Generate outputs
-mkl_generate
-
-# Summarize what happened
-mkl_summary
-
-# Write cache file
-mkl_cache_write
-
-
-echo ""
-echo "Now type 'make' to build"
-trap - EXIT
-exit 0

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/configure.librdkafka
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/configure.librdkafka b/thirdparty/librdkafka-0.11.1/configure.librdkafka
deleted file mode 100644
index e832e3c..0000000
--- a/thirdparty/librdkafka-0.11.1/configure.librdkafka
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/bin/bash
-#
-
-mkl_meta_set "description" "name"      "librdkafka"
-mkl_meta_set "description" "oneline"   "The Apache Kafka C/C++ library"
-mkl_meta_set "description" "long"      "Full Apache Kafka protocol support, including producer and consumer"
-mkl_meta_set "description" "copyright" "Copyright (c) 2012-2015 Magnus Edenhill"
-
-# Enable generation of pkg-config .pc file
-mkl_mkvar_set "" GEN_PKG_CONFIG y
-
-
-mkl_require cxx
-mkl_require lib
-mkl_require pic
-mkl_require atomics
-mkl_require good_cflags
-mkl_require socket
-
-# Generate version variables from rdkafka.h hex version define
-# so we can use it as string version when generating a pkg-config file.
-
-verdef=$(grep '^#define  *RD_KAFKA_VERSION  *0x' src/rdkafka.h | sed 's/^#define  *RD_KAFKA_VERSION  *\(0x[a-f0-9]*\)\.*$/\1/')
-mkl_require parseversion hex2str "%d.%d.%d" "$verdef" RDKAFKA_VERSION_STR
-
-mkl_toggle_option "Development" ENABLE_DEVEL "--enable-devel" "Enable development asserts, checks, etc" "n"
-mkl_toggle_option "Development" ENABLE_VALGRIND "--enable-valgrind" "Enable in-code valgrind suppressions" "n"
-
-mkl_toggle_option "Development" ENABLE_REFCNT_DEBUG "--enable-refcnt-debug" "Enable refcnt debugging" "n"
-
-mkl_toggle_option "Development" ENABLE_SHAREDPTR_DEBUG "--enable-sharedptr-debug" "Enable sharedptr debugging" "n"
-
-mkl_toggle_option "Feature" ENABLE_LZ4_EXT "--enable-lz4" "Enable external LZ4 library support" "y"
-
-mkl_toggle_option "Feature" ENABLE_SSL "--enable-ssl" "Enable SSL support" "y"
-mkl_toggle_option "Feature" ENABLE_SASL "--enable-sasl" "Enable SASL support with Cyrus libsasl2" "y"
-
-
-function checks {
-
-    # required libs
-    mkl_lib_check "libpthread" "" fail CC "-lpthread" \
-                  "#include <pthread.h>"
-
-    # optional libs
-    mkl_lib_check "zlib" "WITH_ZLIB" disable CC "-lz" \
-                  "#include <zlib.h>"
-    mkl_lib_check "libcrypto" "" disable CC "-lcrypto"
-
-    if [[ "$ENABLE_LZ4_EXT" == "y" ]]; then
-        mkl_lib_check --static=-llz4 "liblz4" "WITH_LZ4_EXT" disable CC "-llz4" \
-                      "#include <lz4frame.h>"
-    fi
-
-    # Snappy support is built-in
-    mkl_allvar_set WITH_SNAPPY WITH_SNAPPY y
-
-    # Enable sockem (tests)
-    mkl_allvar_set WITH_SOCKEM WITH_SOCKEM y
-
-    if [[ "$ENABLE_SSL" == "y" ]]; then
-	mkl_meta_set "libssl" "deb" "libssl-dev"
-        if [[ $MKL_DISTRO == "osx" ]]; then
-            # Add brew's OpenSSL pkg-config path on OSX
-            export PKG_CONFIG_PATH="$PKG_CONFIG_PATH:/usr/local/opt/openssl/lib/pkgconfig"
-        fi
-	mkl_lib_check "libssl" "WITH_SSL" disable CC "-lssl" \
-                      "#include <openssl/ssl.h>"
-    fi
-
-    if [[ "$ENABLE_SASL" == "y" ]]; then
-        mkl_meta_set "libsasl2" "deb" "libsasl2-dev"
-        if ! mkl_lib_check "libsasl2" "WITH_SASL_CYRUS" disable CC "-lsasl2" "#include <sasl/sasl.h>" ; then
-	    mkl_lib_check "libsasl" "WITH_SASL_CYRUS" disable CC "-lsasl" \
-                          "#include <sasl/sasl.h>"
-        fi
-    fi
-
-    if [[ "$WITH_SSL" == "y" ]]; then
-        # SASL SCRAM requires base64 encoding from OpenSSL
-        mkl_allvar_set WITH_SASL_SCRAM WITH_SASL_SCRAM y
-    fi
-
-    # CRC32C: check for crc32 instruction support.
-    #         This is also checked during runtime using cpuid.
-    mkl_compile_check crc32chw WITH_CRC32C_HW disable CC "" \
-                      "
-#include <inttypes.h>
-#include <stdio.h>
-#define LONGx1 \"8192\"
-#define LONGx2 \"16384\"
-void foo (void) {
-   const char *n = \"abcdefghijklmnopqrstuvwxyz0123456789\";
-   uint64_t c0 = 0, c1 = 1, c2 = 2;
-   uint64_t s;
-   uint32_t eax = 1, ecx;
-   __asm__(\"cpuid\"
-           : \"=c\"(ecx)
-           : \"a\"(eax)
-           : \"%ebx\", \"%edx\");
-   __asm__(\"crc32b\t\" \"(%1), %0\"
-           : \"=r\"(c0)
-           : \"r\"(n), \"0\"(c0));
-   __asm__(\"crc32q\t\" \"(%3), %0\n\t\"
-           \"crc32q\t\" LONGx1 \"(%3), %1\n\t\"
-           \"crc32q\t\" LONGx2 \"(%3), %2\"
-           : \"=r\"(c0), \"=r\"(c1), \"=r\"(c2)
-           : \"r\"(n), \"0\"(c0), \"1\"(c1), \"2\"(c2));
-  s = c0 + c1 + c2;
-  printf(\"avoiding unused code removal by printing %d, %d, %d\n\", (int)s, (int)eax, (int)ecx);
-}
-"
-
-
-    # Check for libc regex
-    mkl_compile_check "regex" "HAVE_REGEX" disable CC "" \
-"
-#include <stddef.h>
-#include <regex.h>
-void foo (void) {
-   regcomp(NULL, NULL, 0);
-   regexec(NULL, NULL, 0, NULL, 0);
-   regerror(0, NULL, NULL, 0);
-   regfree(NULL);
-}"
-
-
-    # -lrt is needed on linux for clock_gettime: link it if it exists.
-    mkl_lib_check "librt" "" cont CC "-lrt"
-
-    # Older g++ (<=4.1?) gives invalid warnings for the C++ code.
-    mkl_mkvar_append CXXFLAGS CXXFLAGS "-Wno-non-virtual-dtor"
-
-    # Required on SunOS
-    if [[ $MKL_DISTRO == "SunOS" ]]; then
-	mkl_mkvar_append CPPFLAGS CPPFLAGS "-D_POSIX_PTHREAD_SEMANTICS -D_REENTRANT -D__EXTENSIONS__"
-	# Source defines _POSIX_C_SOURCE to 200809L for Solaris, and this is
-	# incompatible on that platform with compilers < c99.
-	mkl_mkvar_append CFLAGS CFLAGS "-std=c99"
-    fi
-
-    # Check if strndup() is available (isn't on Solaris 10)
-    mkl_compile_check "strndup" "HAVE_STRNDUP" disable CC "" \
-"#include <string.h>
-int foo (void) {
-   return strndup(\"hi\", 2) ? 0 : 1;
-}"
-
-    # Check if strerror_r() is available.
-    # The check for GNU vs XSI is done in rdposix.h since
-    # we can't rely on all defines to be set here (_GNU_SOURCE).
-    mkl_compile_check "strerror_r" "HAVE_STRERROR_R" disable CC "" \
-"#include <string.h>
-const char *foo (void) {
-   static char buf[64];
-   strerror_r(1, buf, sizeof(buf));
-   return buf;
-}"
-
-    # Check if dlopen() is available
-    mkl_lib_check "libdl" "WITH_LIBDL" disable CC "-ldl" \
-"
-#include <stdlib.h>
-#include <dlfcn.h>
-void foo (void) {
-   void *h = dlopen(\"__bad_lib\", 0);
-   void *p = dlsym(h, \"sym\");
-   if (p)
-     p = NULL;
-   dlclose(h);
-}"
-
-    if [[ $WITH_LIBDL == "y" ]]; then
-        mkl_allvar_set WITH_PLUGINS WITH_PLUGINS y
-    fi
-
-    # Figure out what tool to use for dumping public symbols.
-    # We rely on configure.cc setting up $NM if it exists.
-    if mkl_env_check "nm" "" cont "NM" ; then
-	# nm by future mk var
-	if [[ $MKL_DISTRO == "osx" || $MKL_DISTRO == "AIX" ]]; then
-	    mkl_mkvar_set SYMDUMPER SYMDUMPER '$(NM) -g'
-	else
-	    mkl_mkvar_set SYMDUMPER SYMDUMPER '$(NM) -D'
-	fi
-    else
-	# Fake symdumper
-	mkl_mkvar_set SYMDUMPER SYMDUMPER 'echo'
-    fi
-
-    # The linker-script generator (lds-gen.py) requires python
-    if [[ $WITH_LDS == y ]]; then
-        if ! mkl_command_check python "HAVE_PYTHON" "disable" "python -V"; then
-            mkl_err "disabling linker-script since python is not available"
-            mkl_mkvar_set WITH_LDS WITH_LDS "n"
-        fi
-    fi
-
-    if [[ "$ENABLE_VALGRIND" == "y" ]]; then
-	mkl_compile_check valgrind WITH_VALGRIND disable CC "" \
-			  "#include <valgrind/memcheck.h>"
-    fi
-}
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/dev-conf.sh
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/dev-conf.sh b/thirdparty/librdkafka-0.11.1/dev-conf.sh
deleted file mode 100755
index b9b93f4..0000000
--- a/thirdparty/librdkafka-0.11.1/dev-conf.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-#
-# Configure librdkafka for development
-
-set -e
-./configure --clean
-#export CFLAGS='-std=c99 -pedantic -Wshadow'
-#export CXXFLAGS='-std=c++98 -pedantic'
-
-FSAN="-fsanitize=address"
-export CPPFLAGS="$CPPFLAGS $FSAN"
-export LDFLAGS="$LDFLAGS $FSAN"
-./configure --enable-devel --enable-werror
-#--disable-optimization
-#            --enable-sharedptr-debug #--enable-refcnt-debug

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/examples/.gitignore
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/examples/.gitignore b/thirdparty/librdkafka-0.11.1/examples/.gitignore
deleted file mode 100644
index c06a6cb..0000000
--- a/thirdparty/librdkafka-0.11.1/examples/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-rdkafka_example
-rdkafka_performance
-rdkafka_example_cpp
-rdkafka_consumer_example
-rdkafka_consumer_example_cpp
-kafkatest_verifiable_client
-rdkafka_simple_producer

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/examples/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/examples/CMakeLists.txt b/thirdparty/librdkafka-0.11.1/examples/CMakeLists.txt
deleted file mode 100644
index 2ae7784..0000000
--- a/thirdparty/librdkafka-0.11.1/examples/CMakeLists.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-add_executable(rdkafka_example rdkafka_example.c)
-target_link_libraries(rdkafka_example PUBLIC rdkafka)
-
-add_executable(rdkafka_simple_producer rdkafka_simple_producer.c)
-target_link_libraries(rdkafka_simple_producer PUBLIC rdkafka)
-
-add_executable(rdkafka_consumer_example rdkafka_consumer_example.c)
-target_link_libraries(rdkafka_consumer_example PUBLIC rdkafka)
-
-add_executable(rdkafka_performance rdkafka_performance.c)
-target_link_libraries(rdkafka_performance PUBLIC rdkafka)
-
-add_executable(rdkafka_example_cpp rdkafka_example.cpp)
-target_link_libraries(rdkafka_example_cpp PUBLIC rdkafka++)
-
-add_executable(kafkatest_verifiable_client kafkatest_verifiable_client.cpp)
-target_link_libraries(kafkatest_verifiable_client PUBLIC rdkafka++)
-
-add_executable(rdkafka_consumer_example_cpp rdkafka_consumer_example.cpp)
-target_link_libraries(rdkafka_consumer_example_cpp PUBLIC rdkafka++)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/examples/Makefile
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/examples/Makefile b/thirdparty/librdkafka-0.11.1/examples/Makefile
deleted file mode 100644
index 5a33a52..0000000
--- a/thirdparty/librdkafka-0.11.1/examples/Makefile
+++ /dev/null
@@ -1,92 +0,0 @@
-EXAMPLES ?= rdkafka_example rdkafka_performance rdkafka_example_cpp \
-	rdkafka_consumer_example rdkafka_consumer_example_cpp \
-	kafkatest_verifiable_client rdkafka_simple_producer
-
-all: $(EXAMPLES)
-
-include ../mklove/Makefile.base
-
-CFLAGS += -I../src
-CXXFLAGS += -I../src-cpp
-
-# librdkafka must be compiled with -gstrict-dwarf, but rdkafka_example must not,
-# due to some clang bug on OSX 10.9
-CPPFLAGS := $(subst strict-dwarf,,$(CPPFLAGS))
-
-rdkafka_example: ../src/librdkafka.a rdkafka_example.c
-	$(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_example.c -o $@ $(LDFLAGS) \
-		../src/librdkafka.a $(LIBS)
-	@echo "# $@ is ready"
-	@echo "#"
-	@echo "# Run producer (write messages on stdin)"
-	@echo "./$@ -P -t <topic> -p <partition>"
-	@echo ""
-	@echo "# or consumer"
-	@echo "./$@ -C -t <topic> -p <partition>"
-	@echo ""
-	@echo "#"
-	@echo "# More usage options:"
-	@echo "./$@ -h"
-
-rdkafka_simple_producer: ../src/librdkafka.a rdkafka_simple_producer.c
-	$(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
-		../src/librdkafka.a $(LIBS)
-
-rdkafka_consumer_example: ../src/librdkafka.a rdkafka_consumer_example.c
-	$(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_consumer_example.c -o $@ $(LDFLAGS) \
-		../src/librdkafka.a $(LIBS)
-	@echo "# $@ is ready"
-	@echo "#"
-	@echo "./$@ <topic[:part]> <topic2[:part]> .."
-	@echo ""
-	@echo "#"
-	@echo "# More usage options:"
-	@echo "./$@ -h"
-
-rdkafka_performance: ../src/librdkafka.a rdkafka_performance.c
-	$(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_performance.c -o $@ $(LDFLAGS) \
-		../src/librdkafka.a $(LIBS)
-	@echo "# $@ is ready"
-	@echo "#"
-	@echo "# Run producer"
-	@echo "./$@ -P -t <topic> -p <partition> -s <msgsize>"
-	@echo ""
-	@echo "# or consumer"
-	@echo "./$@ -C -t <topic> -p <partition>"
-	@echo ""
-	@echo "#"
-	@echo "# More usage options:"
-	@echo "./$@ -h"
-
-
-rdkafka_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_example.cpp
-	$(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_example.cpp -o $@ $(LDFLAGS) \
-		../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++
-
-kafkatest_verifiable_client: ../src-cpp/librdkafka++.a ../src/librdkafka.a kafkatest_verifiable_client.cpp
-	$(CXX) $(CPPFLAGS) $(CXXFLAGS) kafkatest_verifiable_client.cpp -o $@ $(LDFLAGS) \
-		../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++
-
-
-rdkafka_consumer_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_consumer_example.cpp
-	$(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_consumer_example.cpp -o $@ $(LDFLAGS) \
-		../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++
-
-rdkafka_zookeeper_example: ../src/librdkafka.a rdkafka_zookeeper_example.c
-	$(CC) $(CPPFLAGS) $(CFLAGS) -I/usr/include/zookeeper rdkafka_zookeeper_example.c -o $@ $(LDFLAGS) \
-		../src/librdkafka.a $(LIBS) -lzookeeper_mt -ljansson
-	@echo "# $@ is ready"
-	@echo "#"
-	@echo "# Run producer (write messages on stdin)"
-	@echo "./$@ -P -t <topic> -p <partition>"
-	@echo ""
-	@echo "# or consumer"
-	@echo "./$@ -C -t <topic> -p <partition>"
-	@echo ""
-	@echo "#"
-	@echo "# More usage options:"
-	@echo "./$@ -h"
-
-clean:
-	rm -f $(EXAMPLES)
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/examples/globals.json
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/examples/globals.json b/thirdparty/librdkafka-0.11.1/examples/globals.json
deleted file mode 100644
index 527e126..0000000
--- a/thirdparty/librdkafka-0.11.1/examples/globals.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{"VerifiableConsumer":
- {
-     "class": "kafkatest.services.verifiable_client.VerifiableClientApp",
-     "exec_cmd": "/vagrant/tests/c/kafkatest_verifiable_client --consumer --debug cgrp,topic,protocol,broker"
- },
- "VerifiableProducer":
- {
-     "class": "kafkatest.services.verifiable_client.VerifiableClientApp",
-     "exec_cmd": "/vagrant/tests/c/kafkatest_verifiable_client --producer --debug topic,broker"
- }
-}


[25/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_msgset_reader.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_msgset_reader.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_msgset_reader.c
deleted file mode 100644
index a073819..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_msgset_reader.c
+++ /dev/null
@@ -1,1090 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * @name MessageSet reader interface
- *
- * Parses FetchResponse for Messages
- *
- *
- * @remark
- * The broker may send partial messages, when this happens we bail out
- * silently and keep the messages that we successfully parsed.
- *
- * "A Guide To The Kafka Protocol" states:
- *   "As an optimization the server is allowed to
- *    return a partial message at the end of the
- *    message set.
- *    Clients should handle this case."
- *
- * We're handling it by not passing the error upstream.
- * This is why most err_parse: goto labels (that are called from buf parsing
- * macros) suppress the error message and why log_decode_errors is off
- * unless PROTOCOL debugging is enabled.
- *
- * When a FetchResponse contains multiple partitions, each partition's
- * MessageSet may be partial, regardless of the other partitions.
- * To make sure the next partition can be parsed, each partition parse
- * uses its own sub-slice of only that partition's MessageSetSize length.
- */
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_msg.h"
-#include "rdkafka_msgset.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_lz4.h"
-
-#include "rdvarint.h"
-#include "crc32c.h"
-
-#if WITH_ZLIB
-#include "rdgz.h"
-#endif
-#if WITH_SNAPPY
-#include "snappy.h"
-#endif
-
-
-
-struct msgset_v2_hdr {
-        int64_t BaseOffset;
-        int32_t Length;
-        int32_t PartitionLeaderEpoch;
-        int8_t  MagicByte;
-        int32_t Crc;
-        int16_t Attributes;
-        int32_t LastOffsetDelta;
-        int64_t BaseTimestamp;
-        int64_t MaxTimestamp;
-        int64_t PID;
-        int16_t ProducerEpoch;
-        int32_t BaseSequence;
-        int32_t RecordCount;
-};
-
-
-typedef struct rd_kafka_msgset_reader_s {
-        rd_kafka_buf_t *msetr_rkbuf;     /**< Response read buffer */
-
-        int     msetr_relative_offsets;  /**< Bool: using relative offsets */
-
-        /**< Outer/wrapper Message fields. */
-        struct {
-                int64_t offset;      /**< Relative_offsets: outer message's
-                                      *   Offset (last offset) */
-                rd_kafka_timestamp_type_t tstype; /**< Compressed
-                                                   *   MessageSet's
-                                                   *   timestamp type. */
-                int64_t timestamp;                /**< ... timestamp*/
-        } msetr_outer;
-
-        struct msgset_v2_hdr   *msetr_v2_hdr;    /**< MessageSet v2 header */
-
-        const struct rd_kafka_toppar_ver *msetr_tver; /**< Toppar op version of
-                                                       *   request. */
-
-        rd_kafka_broker_t *msetr_rkb;    /* @warning Not a refcounted
-                                          *          reference! */
-        rd_kafka_toppar_t *msetr_rktp;   /* @warning Not a refcounted
-                                          *          reference! */
-
-        int          msetr_msgcnt;      /**< Number of messages in rkq */
-        rd_kafka_q_t msetr_rkq;         /**< Temp Message and error queue */
-        rd_kafka_q_t *msetr_par_rkq;    /**< Parent message and error queue,
-                                         *   the temp msetr_rkq will be moved
-                                         *   to this queue when parsing
-                                         *   is done.
-                                         *   Refcount is not increased. */
-} rd_kafka_msgset_reader_t;
-
-
-
-/* Forward declarations */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_run (rd_kafka_msgset_reader_t *msetr);
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_msgs_v2 (rd_kafka_msgset_reader_t *msetr);
-
-
-/**
- * @brief Set up a MessageSet reader but don't start reading messages.
- */
-static void
-rd_kafka_msgset_reader_init (rd_kafka_msgset_reader_t *msetr,
-                             rd_kafka_buf_t *rkbuf,
-                             rd_kafka_toppar_t *rktp,
-                             const struct rd_kafka_toppar_ver *tver,
-                             rd_kafka_q_t *par_rkq) {
-
-        memset(msetr, 0, sizeof(*msetr));
-
-        msetr->msetr_rkb        = rkbuf->rkbuf_rkb;
-        msetr->msetr_rktp       = rktp;
-        msetr->msetr_tver       = tver;
-        msetr->msetr_rkbuf      = rkbuf;
-
-        /* All parsed messages are put on this temporary op
-         * queue first and then moved in one go to the real op queue. */
-        rd_kafka_q_init(&msetr->msetr_rkq, msetr->msetr_rkb->rkb_rk);
-
-        /* Make sure enqueued ops get the correct serve/opaque reflecting the
-         * original queue. */
-        msetr->msetr_rkq.rkq_serve  = par_rkq->rkq_serve;
-        msetr->msetr_rkq.rkq_opaque = par_rkq->rkq_opaque;
-
-        /* Keep (non-refcounted) reference to parent queue for
-         * moving the messages and events in msetr_rkq to when
-         * parsing is done. */
-        msetr->msetr_par_rkq = par_rkq;
-}
-
-
-
-
-
-/**
- * @brief Decompress MessageSet, pass the uncompressed MessageSet to
- *        the MessageSet reader.
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr,
-                                   int MsgVersion, int Attributes,
-                                   int64_t Timestamp, int64_t Offset,
-                                   const void *compressed,
-                                   size_t compressed_size) {
-        struct iovec iov = { .iov_base = NULL, .iov_len = 0 };
-        rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
-        int codec = Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK;
-        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-        rd_kafka_buf_t *rkbufz;
-
-        switch (codec)
-        {
-#if WITH_ZLIB
-        case RD_KAFKA_COMPRESSION_GZIP:
-        {
-                uint64_t outlenx = 0;
-
-                /* Decompress Message payload */
-                iov.iov_base = rd_gz_decompress(compressed, (int)compressed_size,
-                                                &outlenx);
-                if (unlikely(!iov.iov_base)) {
-                        rd_rkb_dbg(msetr->msetr_rkb, MSG, "GZIP",
-                                   "Failed to decompress Gzip "
-                                   "message at offset %"PRId64
-                                   " of %"PRIusz" bytes: "
-                                   "ignoring message",
-                                   Offset, compressed_size);
-                        err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-                        goto err;
-                }
-
-                iov.iov_len = (size_t)outlenx;
-        }
-        break;
-#endif
-
-#if WITH_SNAPPY
-        case RD_KAFKA_COMPRESSION_SNAPPY:
-        {
-                const char *inbuf = compressed;
-                size_t inlen = compressed_size;
-                int r;
-                static const unsigned char snappy_java_magic[] =
-                        { 0x82, 'S','N','A','P','P','Y', 0 };
-                static const size_t snappy_java_hdrlen = 8+4+4;
-
-                /* snappy-java adds its own header (SnappyCodec)
-                 * which is not compatible with the official Snappy
-                 * implementation.
-                 *   8: magic, 4: version, 4: compatible
-                 * followed by any number of chunks:
-                 *   4: length
-                 * ...: snappy-compressed data. */
-                if (likely(inlen > snappy_java_hdrlen + 4 &&
-                           !memcmp(inbuf, snappy_java_magic, 8))) {
-                        /* snappy-java framing */
-                        char errstr[128];
-
-                        inbuf  = inbuf + snappy_java_hdrlen;
-                        inlen -= snappy_java_hdrlen;
-                        iov.iov_base = rd_kafka_snappy_java_uncompress(
-                                inbuf, inlen,
-                                &iov.iov_len,
-                                errstr, sizeof(errstr));
-
-                        if (unlikely(!iov.iov_base)) {
-                                rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
-                                           "%s [%"PRId32"]: "
-                                           "Snappy decompression for message "
-                                           "at offset %"PRId64" failed: %s: "
-                                           "ignoring message",
-                                           rktp->rktp_rkt->rkt_topic->str,
-                                           rktp->rktp_partition,
-                                           Offset, errstr);
-                                err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-                                goto err;
-                        }
-
-
-                } else {
-                        /* No framing */
-
-                        /* Acquire uncompressed length */
-                        if (unlikely(!rd_kafka_snappy_uncompressed_length(
-                                             inbuf, inlen, &iov.iov_len))) {
-                                rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
-                                           "Failed to get length of Snappy "
-                                           "compressed payload "
-                                           "for message at offset %"PRId64
-                                           " (%"PRIusz" bytes): "
-                                           "ignoring message",
-                                           Offset, inlen);
-                                err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-                                goto err;
-                        }
-
-                        /* Allocate output buffer for uncompressed data */
-                        iov.iov_base = rd_malloc(iov.iov_len);
-                        if (unlikely(!iov.iov_base)) {
-                                rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
-                                           "Failed to allocate Snappy "
-                                           "decompress buffer of size %"PRIusz
-                                           "for message at offset %"PRId64
-                                           " (%"PRIusz" bytes): %s: "
-                                           "ignoring message",
-                                           iov.iov_len, Offset, inlen,
-                                           rd_strerror(errno));
-                                err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
-                                goto err;
-                        }
-
-                        /* Uncompress to outbuf */
-                        if (unlikely((r = rd_kafka_snappy_uncompress(
-                                              inbuf, inlen, iov.iov_base)))) {
-                                rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
-                                           "Failed to decompress Snappy "
-                                           "payload for message at offset "
-                                           "%"PRId64" (%"PRIusz" bytes): %s: "
-                                           "ignoring message",
-                                           Offset, inlen,
-                                           rd_strerror(-r/*negative errno*/));
-                                rd_free(iov.iov_base);
-                                err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-                                goto err;
-                        }
-                }
-
-        }
-        break;
-#endif
-
-        case RD_KAFKA_COMPRESSION_LZ4:
-        {
-                err = rd_kafka_lz4_decompress(msetr->msetr_rkb,
-                                              /* Proper HC? */
-                                              MsgVersion >= 1 ? 1 : 0,
-                                              Offset,
-                                              /* @warning Will modify compressed
-                                               *          if no proper HC */
-                                              (char *)compressed,
-                                              compressed_size,
-                                              &iov.iov_base, &iov.iov_len);
-                if (err)
-                        goto err;
-        }
-        break;
-
-        default:
-                rd_rkb_dbg(msetr->msetr_rkb, MSG, "CODEC",
-                           "%s [%"PRId32"]: Message at offset %"PRId64
-                           " with unsupported "
-                           "compression codec 0x%x: message ignored",
-                           rktp->rktp_rkt->rkt_topic->str,
-                           rktp->rktp_partition,
-                           Offset, (int)codec);
-
-                err = RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
-                goto err;
-        }
-
-
-        rd_assert(iov.iov_base);
-
-        /*
-         * Decompression successful
-         */
-
-        /* Create a new buffer pointing to the uncompressed
-         * allocated buffer (outbuf) and let messages keep a reference to
-         * this new buffer. */
-        rkbufz = rd_kafka_buf_new_shadow(iov.iov_base, iov.iov_len, rd_free);
-        rkbufz->rkbuf_rkb = msetr->msetr_rkbuf->rkbuf_rkb;
-        rd_kafka_broker_keep(rkbufz->rkbuf_rkb);
-
-
-        /* In MsgVersion v0..1 the decompressed data contains
-         * an inner MessageSet, pass it to a new MessageSet reader.
-         *
-         * For MsgVersion v2 the decompressed data are the list of messages.
-         */
-
-        if (MsgVersion <= 1) {
-                /* Pass decompressed data (inner Messageset)
-                 * to new instance of the MessageSet parser. */
-                rd_kafka_msgset_reader_t inner_msetr;
-                rd_kafka_msgset_reader_init(&inner_msetr,
-                                            rkbufz,
-                                            msetr->msetr_rktp,
-                                            msetr->msetr_tver,
-                                            &msetr->msetr_rkq);
-
-                if (MsgVersion == 1) {
-                        /* postproc() will convert relative to
-                         * absolute offsets */
-                        inner_msetr.msetr_relative_offsets = 1;
-                        inner_msetr.msetr_outer.offset = Offset;
-
-                        /* Apply single LogAppendTime timestamp for
-                         * all messages. */
-                        if (Attributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME) {
-                                inner_msetr.msetr_outer.tstype =
-                                        RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME;
-                                inner_msetr.msetr_outer.timestamp = Timestamp;
-                        }
-                }
-
-                /* Parse the inner MessageSet */
-                err = rd_kafka_msgset_reader_run(&inner_msetr);
-
-
-        } else {
-                /* MsgVersion 2 */
-                rd_kafka_buf_t *orig_rkbuf = msetr->msetr_rkbuf;
-
-                /* Temporarily replace read buffer with uncompressed buffer */
-                msetr->msetr_rkbuf = rkbufz;
-
-                /* Read messages */
-                err = rd_kafka_msgset_reader_msgs_v2(msetr);
-
-                /* Restore original buffer */
-                msetr->msetr_rkbuf = orig_rkbuf;
-        }
-
-        /* Loose our refcnt of the uncompressed rkbuf.
-         * Individual messages/rko's will have their own reference. */
-        rd_kafka_buf_destroy(rkbufz);
-
-        return err;
-
- err:
-        /* Enqueue error messsage:
-         * Create op and push on temporary queue. */
-        rd_kafka_q_op_err(&msetr->msetr_rkq, RD_KAFKA_OP_CONSUMER_ERR,
-                          err, msetr->msetr_tver->version, rktp, Offset,
-                          "Decompression (codec 0x%x) of message at %"PRIu64
-                          " of %"PRIu64" bytes failed: %s",
-                          codec, Offset, compressed_size, rd_kafka_err2str(err));
-
-        return err;
-
-}
-
-
-
-/**
- * @brief Message parser for MsgVersion v0..1
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or on single-message errors,
- *          or any other error code when the MessageSet parser should stop
- *          parsing (such as for partial Messages).
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) {
-        rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
-        rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
-        rd_kafka_broker_t *rkb = msetr->msetr_rkb;
-        struct {
-                int64_t Offset;       /* MessageSet header */
-                int32_t MessageSize;  /* MessageSet header */
-                uint32_t Crc;
-                int8_t  MagicByte;  /* MsgVersion */
-                int8_t  Attributes;
-                int64_t Timestamp;  /* v1 */
-        } hdr; /* Message header */
-        rd_kafkap_bytes_t Key;
-        rd_kafkap_bytes_t Value;
-        int32_t Value_len;
-        rd_kafka_op_t *rko;
-        size_t hdrsize = 6; /* Header size following MessageSize */
-        rd_slice_t crc_slice;
-        rd_kafka_msg_t *rkm;
-        int relative_offsets = 0;
-        const char *reloff_str = "";
-        /* Only log decoding errors if protocol debugging enabled. */
-        int log_decode_errors = (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug &
-                                 RD_KAFKA_DBG_PROTOCOL) ? LOG_DEBUG : 0;
-        size_t message_end;
-
-        rd_kafka_buf_read_i64(rkbuf, &hdr.Offset);
-        rd_kafka_buf_read_i32(rkbuf, &hdr.MessageSize);
-        message_end = rd_slice_offset(&rkbuf->rkbuf_reader) + hdr.MessageSize;
-
-        rd_kafka_buf_read_i32(rkbuf, &hdr.Crc);
-        if (!rd_slice_narrow_copy_relative(&rkbuf->rkbuf_reader, &crc_slice,
-                                           hdr.MessageSize - 4))
-                rd_kafka_buf_check_len(rkbuf, hdr.MessageSize - 4);
-
-        rd_kafka_buf_read_i8(rkbuf, &hdr.MagicByte);
-        rd_kafka_buf_read_i8(rkbuf, &hdr.Attributes);
-
-        if (hdr.MagicByte == 1) { /* MsgVersion */
-                rd_kafka_buf_read_i64(rkbuf, &hdr.Timestamp);
-                hdrsize += 8;
-                /* MsgVersion 1 has relative offsets for compressed MessageSets*/
-                if (!(hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) &&
-                    msetr->msetr_relative_offsets) {
-                        relative_offsets = 1;
-                        reloff_str = "relative ";
-                }
-        } else
-                hdr.Timestamp = 0;
-
-        /* Verify MessageSize */
-        if (unlikely(hdr.MessageSize < (ssize_t)hdrsize))
-                rd_kafka_buf_parse_fail(rkbuf,
-                                        "Message at %soffset %"PRId64
-                                        " MessageSize %"PRId32
-                                        " < hdrsize %"PRIusz,
-                                        reloff_str,
-                                        hdr.Offset, hdr.MessageSize, hdrsize);
-
-        /* Early check for partial messages */
-        rd_kafka_buf_check_len(rkbuf, hdr.MessageSize - hdrsize);
-
-        if (rkb->rkb_rk->rk_conf.check_crcs) {
-                /* Verify CRC32 if desired. */
-                uint32_t calc_crc;
-
-                calc_crc = rd_slice_crc32(&crc_slice);
-                rd_dassert(rd_slice_remains(&crc_slice) == 0);
-
-                if (unlikely(hdr.Crc != calc_crc)) {
-                        /* Propagate CRC error to application and
-                         * continue with next message. */
-                        rd_kafka_q_op_err(&msetr->msetr_rkq,
-                                          RD_KAFKA_OP_CONSUMER_ERR,
-                                          RD_KAFKA_RESP_ERR__BAD_MSG,
-                                          msetr->msetr_tver->version,
-                                          rktp,
-                                          hdr.Offset,
-                                          "Message at %soffset %"PRId64
-                                          " (%"PRId32" bytes) "
-                                          "failed CRC32 check "
-                                          "(original 0x%"PRIx32" != "
-                                          "calculated 0x%"PRIx32")",
-                                          reloff_str, hdr.Offset,
-                                          hdr.MessageSize, hdr.Crc, calc_crc);
-                        rd_kafka_buf_skip_to(rkbuf, message_end);
-                        rd_atomic64_add(&rkb->rkb_c.rx_err, 1);
-                        /* Continue with next message */
-                        return RD_KAFKA_RESP_ERR_NO_ERROR;
-                }
-        }
-
-
-        /* Extract key */
-        rd_kafka_buf_read_bytes(rkbuf, &Key);
-
-        /* Extract Value */
-        rd_kafka_buf_read_bytes(rkbuf, &Value);
-        Value_len = RD_KAFKAP_BYTES_LEN(&Value);
-
-        /* MessageSets may contain offsets earlier than we
-         * requested (compressed MessageSets in particular),
-         * drop the earlier messages.
-         * Note: the inner offset may only be trusted for
-         *       absolute offsets. KIP-31 introduced
-         *       ApiVersion 2 that maintains relative offsets
-         *       of compressed messages and the base offset
-         *       in the outer message is the offset of
-         *       the *LAST* message in the MessageSet.
-         *       This requires us to assign offsets
-         *       after all messages have been read from
-         *       the messageset, and it also means
-         *       we cant perform this offset check here
-         *       in that case. */
-        if (!relative_offsets &&
-            hdr.Offset < rktp->rktp_offsets.fetch_offset)
-                return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next msg */
-
-        /* Handle compressed MessageSet */
-        if (unlikely(hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK))
-                return rd_kafka_msgset_reader_decompress(
-                        msetr, hdr.MagicByte, hdr.Attributes, hdr.Timestamp,
-                        hdr.Offset, Value.data, Value_len);
-
-
-        /* Pure uncompressed message, this is the innermost
-         * handler after all compression and cascaded
-         * MessageSets have been peeled off. */
-
-        /* Create op/message container for message. */
-        rko = rd_kafka_op_new_fetch_msg(&rkm, rktp, msetr->msetr_tver->version,
-                                        rkbuf,
-                                        hdr.Offset,
-                                        (size_t)RD_KAFKAP_BYTES_LEN(&Key),
-                                        RD_KAFKAP_BYTES_IS_NULL(&Key) ?
-                                        NULL : Key.data,
-                                        (size_t)RD_KAFKAP_BYTES_LEN(&Value),
-                                        RD_KAFKAP_BYTES_IS_NULL(&Value) ?
-                                        NULL : Value.data);
-
-        /* Assign message timestamp.
-         * If message was in a compressed MessageSet and the outer/wrapper
-         * Message.Attribute had a LOG_APPEND_TIME set, use the
-         * outer timestamp */
-        if (msetr->msetr_outer.tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) {
-                rkm->rkm_timestamp = msetr->msetr_outer.timestamp;
-                rkm->rkm_tstype    = msetr->msetr_outer.tstype;
-
-        } else if (hdr.MagicByte >= 1 && hdr.Timestamp) {
-                rkm->rkm_timestamp = hdr.Timestamp;
-                if (hdr.Attributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME)
-                        rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME;
-                else
-                        rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME;
-        }
-
-        /* Enqueue message on temporary queue */
-        rd_kafka_q_enq(&msetr->msetr_rkq, rko);
-        msetr->msetr_msgcnt++;
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue */
-
- err_parse:
-        /* Count all parse errors as partial message errors. */
-        rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1);
-        return rkbuf->rkbuf_err;
-}
-
-
-
-
-/**
- * @brief Message parser for MsgVersion v2
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) {
-        rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
-        rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
-        struct {
-                int64_t Length;
-                int64_t  MsgAttributes; /* int8_t, but int64 req. for varint */
-                int64_t TimestampDelta;
-                int64_t OffsetDelta;
-                int64_t Offset;  /* Absolute offset */
-                rd_kafkap_bytes_t Key;
-                rd_kafkap_bytes_t Value;
-                int64_t HeaderCnt;
-        } hdr;
-        rd_kafka_op_t *rko;
-        rd_kafka_msg_t *rkm;
-        /* Only log decoding errors if protocol debugging enabled. */
-        int log_decode_errors = (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug &
-                                 RD_KAFKA_DBG_PROTOCOL) ? LOG_DEBUG : 0;
-        size_t message_end;
-
-        rd_kafka_buf_read_varint(rkbuf, &hdr.Length);
-        message_end = rd_slice_offset(&rkbuf->rkbuf_reader) + hdr.Length;
-        rd_kafka_buf_read_varint(rkbuf, &hdr.MsgAttributes);
-
-        rd_kafka_buf_read_varint(rkbuf, &hdr.TimestampDelta);
-        rd_kafka_buf_read_varint(rkbuf, &hdr.OffsetDelta);
-        hdr.Offset = msetr->msetr_v2_hdr->BaseOffset + hdr.OffsetDelta;
-
-        /* Skip message if outdated */
-        if (hdr.Offset < rktp->rktp_offsets.fetch_offset) {
-                rd_rkb_dbg(msetr->msetr_rkb, MSG, "MSG",
-                           "Skip offset %"PRId64" < fetch_offset %"PRId64,
-                           hdr.Offset, rktp->rktp_offsets.fetch_offset);
-                rd_kafka_buf_skip_to(rkbuf, message_end);
-                return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next msg */
-        }
-
-        rd_kafka_buf_read_bytes_varint(rkbuf, &hdr.Key);
-
-        rd_kafka_buf_read_bytes_varint(rkbuf, &hdr.Value);
-
-        /* Ignore headers for now */
-        rd_kafka_buf_skip_to(rkbuf, message_end);
-
-        /* Create op/message container for message. */
-        rko = rd_kafka_op_new_fetch_msg(&rkm,
-                                        rktp, msetr->msetr_tver->version, rkbuf,
-                                        hdr.Offset,
-                                        (size_t)RD_KAFKAP_BYTES_LEN(&hdr.Key),
-                                        RD_KAFKAP_BYTES_IS_NULL(&hdr.Key) ?
-                                        NULL : hdr.Key.data,
-                                        (size_t)RD_KAFKAP_BYTES_LEN(&hdr.Value),
-                                        RD_KAFKAP_BYTES_IS_NULL(&hdr.Value) ?
-                                        NULL : hdr.Value.data);
-
-        /* Set timestamp.
-         *
-         * When broker assigns the timestamps (LOG_APPEND_TIME) it will
-         * assign the same timestamp for all messages in a MessageSet
-         * using MaxTimestamp.
-         */
-        if ((msetr->msetr_v2_hdr->Attributes &
-             RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME) ||
-            (hdr.MsgAttributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME)) {
-                rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME;
-                rkm->rkm_timestamp = msetr->msetr_v2_hdr->MaxTimestamp;
-        } else {
-                rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME;
-                rkm->rkm_timestamp =
-                        msetr->msetr_v2_hdr->BaseTimestamp + hdr.TimestampDelta;
-        }
-
-
-        /* Enqueue message on temporary queue */
-        rd_kafka_q_enq(&msetr->msetr_rkq, rko);
-        msetr->msetr_msgcnt++;
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- err_parse:
-        /* Count all parse errors as partial message errors. */
-        rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1);
-        return rkbuf->rkbuf_err;
-}
-
-
-/**
- * @brief Read v2 messages from current buffer position.
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_msgs_v2 (rd_kafka_msgset_reader_t *msetr) {
-        while (rd_kafka_buf_read_remain(msetr->msetr_rkbuf)) {
-                rd_kafka_resp_err_t err;
-                err = rd_kafka_msgset_reader_msg_v2(msetr);
-                if (unlikely(err))
-                        return err;
-        }
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * @brief MessageSet reader for MsgVersion v2 (FetchRequest v4)
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) {
-        rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
-        rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
-        struct msgset_v2_hdr hdr;
-        rd_slice_t save_slice;
-        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-        size_t len_start;
-        size_t payload_size;
-        int64_t LastOffset; /* Last absolute Offset in MessageSet header */
-        /* Only log decoding errors if protocol debugging enabled. */
-        int log_decode_errors = (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug &
-                                 RD_KAFKA_DBG_PROTOCOL) ? LOG_DEBUG : 0;
-
-        rd_kafka_buf_read_i64(rkbuf, &hdr.BaseOffset);
-        rd_kafka_buf_read_i32(rkbuf, &hdr.Length);
-        len_start  = rd_slice_offset(&rkbuf->rkbuf_reader);
-
-        if (unlikely(hdr.Length < RD_KAFKAP_MSGSET_V2_SIZE - 8 - 4))
-                rd_kafka_buf_parse_fail(rkbuf,
-                                        "%s [%"PRId32"] "
-                                        "MessageSet at offset %"PRId64
-                                        " length %"PRId32" < header size %d",
-                                        rktp->rktp_rkt->rkt_topic->str,
-                                        rktp->rktp_partition,
-                                        hdr.BaseOffset, hdr.Length,
-                                        RD_KAFKAP_MSGSET_V2_SIZE - 8 - 4);
-
-        rd_kafka_buf_read_i32(rkbuf, &hdr.PartitionLeaderEpoch);
-        rd_kafka_buf_read_i8(rkbuf,  &hdr.MagicByte);
-        rd_kafka_buf_read_i32(rkbuf, &hdr.Crc);
-
-        if (msetr->msetr_rkb->rkb_rk->rk_conf.check_crcs) {
-                /* Verify CRC32C if desired. */
-                uint32_t calc_crc;
-                rd_slice_t crc_slice;
-                size_t crc_len = hdr.Length-4-1-4;
-
-                if (!rd_slice_narrow_copy_relative(
-                            &rkbuf->rkbuf_reader,
-                            &crc_slice, crc_len))
-                        rd_kafka_buf_check_len(rkbuf, crc_len);
-
-                calc_crc = rd_slice_crc32c(&crc_slice);
-
-                if (unlikely((uint32_t)hdr.Crc != calc_crc)) {
-                        /* Propagate CRC error to application and
-                         * continue with next message. */
-                        rd_kafka_q_op_err(&msetr->msetr_rkq,
-                                          RD_KAFKA_OP_CONSUMER_ERR,
-                                          RD_KAFKA_RESP_ERR__BAD_MSG,
-                                          msetr->msetr_tver->version,
-                                          rktp,
-                                          hdr.BaseOffset,
-                                          "MessageSet at offset %"PRId64
-                                          " (%"PRId32" bytes) "
-                                          "failed CRC32C check "
-                                          "(original 0x%"PRIx32" != "
-                                          "calculated 0x%"PRIx32")",
-                                          hdr.BaseOffset,
-                                          hdr.Length, hdr.Crc, calc_crc);
-                        rd_kafka_buf_skip_to(rkbuf, crc_len);
-                        rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_err, 1);
-                        return RD_KAFKA_RESP_ERR_NO_ERROR;
-                }
-        }
-
-        rd_kafka_buf_read_i16(rkbuf, &hdr.Attributes);
-        rd_kafka_buf_read_i32(rkbuf, &hdr.LastOffsetDelta);
-        LastOffset = hdr.BaseOffset + hdr.LastOffsetDelta;
-        rd_kafka_buf_read_i64(rkbuf, &hdr.BaseTimestamp);
-        rd_kafka_buf_read_i64(rkbuf, &hdr.MaxTimestamp);
-        rd_kafka_buf_read_i64(rkbuf, &hdr.PID);
-        rd_kafka_buf_read_i16(rkbuf, &hdr.ProducerEpoch);
-        rd_kafka_buf_read_i32(rkbuf, &hdr.BaseSequence);
-        rd_kafka_buf_read_i32(rkbuf, &hdr.RecordCount);
-
-        /* Payload size is hdr.Length - MessageSet headers */
-        payload_size = hdr.Length - (rd_slice_offset(&rkbuf->rkbuf_reader) -
-                                     len_start);
-
-        if (unlikely(payload_size > rd_kafka_buf_read_remain(rkbuf)))
-                rd_kafka_buf_parse_fail(rkbuf,
-                                        "%s [%"PRId32"] "
-                                        "MessageSet at offset %"PRId64
-                                        " payload size %"PRIusz
-                                        " > %"PRIusz" remaining bytes",
-                                        rktp->rktp_rkt->rkt_topic->str,
-                                        rktp->rktp_partition,
-                                        hdr.BaseOffset, payload_size,
-                                        rd_kafka_buf_read_remain(rkbuf));
-
-        /* If entire MessageSet contains old outdated offsets, skip it. */
-        if (LastOffset < rktp->rktp_offsets.fetch_offset) {
-                rd_kafka_buf_skip(rkbuf, payload_size);
-                goto done;
-        }
-
-        /* Ignore control messages */
-        if (unlikely((hdr.Attributes & RD_KAFKA_MSGSET_V2_ATTR_CONTROL))) {
-                rd_kafka_buf_skip(rkbuf, payload_size);
-                goto done;
-        }
-
-        msetr->msetr_v2_hdr = &hdr;
-
-        /* Handle compressed MessageSet */
-        if (hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) {
-                const void *compressed;
-
-                compressed = rd_slice_ensure_contig(&rkbuf->rkbuf_reader,
-                                                    payload_size);
-                rd_assert(compressed);
-
-                err = rd_kafka_msgset_reader_decompress(
-                        msetr, 2/*MsgVersion v2*/, hdr.Attributes,
-                        hdr.BaseTimestamp, hdr.BaseOffset,
-                        compressed, payload_size);
-                if (err)
-                        goto err;
-
-        } else {
-                /* Read uncompressed messages */
-
-                /* Save original slice, reduce size of the current one to
-                 * be limited by the MessageSet.Length, and then start reading
-                 * messages until the lesser slice is exhausted. */
-                if (!rd_slice_narrow_relative(&rkbuf->rkbuf_reader,
-                                              &save_slice, payload_size))
-                        rd_kafka_buf_check_len(rkbuf, payload_size);
-
-                /* Read messages */
-                err = rd_kafka_msgset_reader_msgs_v2(msetr);
-
-                /* Restore wider slice */
-                rd_slice_widen(&rkbuf->rkbuf_reader, &save_slice);
-
-                if (unlikely(err))
-                        goto err;
-        }
-
-
- done:
-        /* Set the next fetch offset to the MessageSet header's last offset + 1
-         * to avoid getting stuck on compacted MessageSets where the last
-         * Message in the MessageSet has an Offset < MessageSet header's
-         * last offset.  See KAFKA-5443 */
-        if (likely(LastOffset >= msetr->msetr_rktp->rktp_offsets.fetch_offset))
-                msetr->msetr_rktp->rktp_offsets.fetch_offset = LastOffset + 1;
-
-        msetr->msetr_v2_hdr = NULL;
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- err_parse:
-        /* Count all parse errors as partial message errors. */
-        rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1);
-        err = rkbuf->rkbuf_err;
-        /* FALLTHRU */
- err:
-        msetr->msetr_v2_hdr = NULL;
-        return err;
-}
-
-
-
-/**
- * @brief Parse and read messages from msgset reader buffer.
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader (rd_kafka_msgset_reader_t *msetr) {
-        rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
-        rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
-        rd_kafka_resp_err_t (*reader[]) (rd_kafka_msgset_reader_t *) = {
-                /* Indexed by MsgVersion/MagicByte, pointing to
-                 * a Msg(Set)Version reader */
-                [0] = rd_kafka_msgset_reader_msg_v0_1,
-                [1] = rd_kafka_msgset_reader_msg_v0_1,
-                [2] = rd_kafka_msgset_reader_v2
-        };
-        rd_kafka_resp_err_t err;
-        /* Only log decoding errors if protocol debugging enabled. */
-        int log_decode_errors = (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug &
-                                 RD_KAFKA_DBG_PROTOCOL) ? LOG_DEBUG : 0;
-        int8_t MagicByte;
-        size_t read_offset = rd_slice_offset(&rkbuf->rkbuf_reader);
-
-        /* We dont know the MsgVersion at this point, peek where the
-         * MagicByte resides both in MsgVersion v0..1 and v2 to
-         * know which MessageSet reader to use. */
-        rd_kafka_buf_peek_i8(rkbuf, read_offset+8+4+4, &MagicByte);
-
-        if (unlikely(MagicByte < 0 || MagicByte > 2)) {
-                int64_t Offset; /* For error logging */
-                rd_kafka_buf_peek_i64(rkbuf, read_offset+0, &Offset);
-
-                rd_rkb_dbg(msetr->msetr_rkb,
-                           MSG | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FETCH,
-                           "MAGICBYTE",
-                           "%s [%"PRId32"]: "
-                           "Unsupported Message(Set) MagicByte %d at "
-                           "offset %"PRId64": skipping",
-                           rktp->rktp_rkt->rkt_topic->str,
-                           rktp->rktp_partition,
-                           (int)MagicByte, Offset);
-                if (Offset >= msetr->msetr_rktp->rktp_offsets.fetch_offset) {
-                        rd_kafka_q_op_err(
-                                &msetr->msetr_rkq,
-                                RD_KAFKA_OP_CONSUMER_ERR,
-                                RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED,
-                                msetr->msetr_tver->version, rktp, Offset,
-                                "Unsupported Message(Set) MagicByte %d "
-                                "at offset %"PRId64,
-                                (int)MagicByte, Offset);
-                        /* Skip message(set) */
-                        msetr->msetr_rktp->rktp_offsets.fetch_offset = Offset+1;
-                }
-
-                return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
-        }
-
-        /* Let version-specific reader parse MessageSets until the slice
-         * is exhausted or an error occurs (typically a partial message). */
-        do {
-                err = reader[(int)MagicByte](msetr);
-        } while (!err && rd_slice_remains(&rkbuf->rkbuf_reader) > 0);
-
-        return err;
-
- err_parse:
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * @brief MessageSet post-processing.
- *
- * @param last_offsetp will be set to the offset of the last message in the set,
- *                     or -1 if not applicable.
- */
-static void rd_kafka_msgset_reader_postproc (rd_kafka_msgset_reader_t *msetr,
-                                             int64_t *last_offsetp) {
-        rd_kafka_op_t *rko;
-
-        if (msetr->msetr_relative_offsets) {
-                /* Update messages to absolute offsets
-                 * and purge any messages older than the current
-                 * fetch offset. */
-                rd_kafka_q_fix_offsets(&msetr->msetr_rkq,
-                                       msetr->msetr_rktp->rktp_offsets.
-                                       fetch_offset,
-                                       msetr->msetr_outer.offset -
-                                       msetr->msetr_msgcnt + 1);
-        }
-
-        rko = rd_kafka_q_last(&msetr->msetr_rkq,
-                              RD_KAFKA_OP_FETCH,
-                              0 /* no error ops */);
-        if (rko)
-                *last_offsetp = rko->rko_u.fetch.rkm.rkm_offset;
-}
-
-
-
-
-
-/**
- * @brief Run the MessageSet reader, read messages until buffer is
- *        exhausted (or error encountered), enqueue parsed messages on
- *        partition queue.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if MessageSet was successfully
- *          or partially parsed. When other error codes are returned it
- *          indicates a semi-permanent error (such as unsupported MsgVersion)
- *          and the fetcher should back off this partition to avoid
- *          busy-looping.
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_run (rd_kafka_msgset_reader_t *msetr) {
-        rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
-        rd_kafka_resp_err_t err;
-        int64_t last_offset = -1;
-
-        /* Parse MessageSets and messages */
-        err = rd_kafka_msgset_reader(msetr);
-
-        if (unlikely(rd_kafka_q_len(&msetr->msetr_rkq) == 0)) {
-                /* The message set didn't contain at least one full message
-                 * or no error was posted on the response queue.
-                 * This means the size limit perhaps was too tight,
-                 * increase it automatically. */
-                if (rktp->rktp_fetch_msg_max_bytes < (1 << 30)) {
-                        rktp->rktp_fetch_msg_max_bytes *= 2;
-                        rd_rkb_dbg(msetr->msetr_rkb, FETCH, "CONSUME",
-                                   "Topic %s [%"PRId32"]: Increasing "
-                                   "max fetch bytes to %"PRId32,
-                                   rktp->rktp_rkt->rkt_topic->str,
-                                   rktp->rktp_partition,
-                                   rktp->rktp_fetch_msg_max_bytes);
-                } else if (!err) {
-                        rd_kafka_q_op_err(
-                                &msetr->msetr_rkq,
-                                RD_KAFKA_OP_CONSUMER_ERR,
-                                RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE,
-                                msetr->msetr_tver->version,
-                                rktp,
-                                rktp->rktp_offsets.fetch_offset,
-                                "Message at offset %"PRId64" "
-                                "might be too large to fetch, try increasing "
-                                "receive.message.max.bytes",
-                                rktp->rktp_offsets.fetch_offset);
-                }
-
-        } else {
-                /* MessageSet post-processing. */
-                rd_kafka_msgset_reader_postproc(msetr, &last_offset);
-
-                /* Ignore parse errors if there was at least one
-                 * good message since it probably indicates a
-                 * partial response rather than an erroneous one. */
-                if (err == RD_KAFKA_RESP_ERR__BAD_MSG &&
-                    msetr->msetr_msgcnt > 0)
-                        err = RD_KAFKA_RESP_ERR_NO_ERROR;
-        }
-
-        rd_rkb_dbg(msetr->msetr_rkb, MSG | RD_KAFKA_DBG_FETCH, "CONSUME",
-                   "Enqueue %i message(s) (%d ops) on %s [%"PRId32"] "
-                   "fetch queue (qlen %d, v%d, last_offset %"PRId64")",
-                   msetr->msetr_msgcnt, rd_kafka_q_len(&msetr->msetr_rkq),
-                   rktp->rktp_rkt->rkt_topic->str,
-                   rktp->rktp_partition, rd_kafka_q_len(&msetr->msetr_rkq),
-                   msetr->msetr_tver->version, last_offset);
-
-        /* Concat all messages&errors onto the parent's queue
-         * (the partition's fetch queue) */
-        if (rd_kafka_q_concat(msetr->msetr_par_rkq, &msetr->msetr_rkq) != -1) {
-                /* Update partition's fetch offset based on
-                 * last message's offest. */
-                if (likely(last_offset != -1)) {
-                        rktp->rktp_offsets.fetch_offset = last_offset + 1;
-                        rd_atomic64_add(&rktp->rktp_c.msgs,
-                                        msetr->msetr_msgcnt);
-                }
-        }
-
-        rd_kafka_q_destroy(&msetr->msetr_rkq);
-
-        /* Skip remaining part of slice so caller can continue
-         * with next partition. */
-        rd_slice_read(&msetr->msetr_rkbuf->rkbuf_reader, NULL,
-                      rd_slice_remains(&msetr->msetr_rkbuf->rkbuf_reader));
-        return err;
-}
-
-
-
-/**
- * @brief Parse one MessageSet at the current buffer read position,
- *        enqueueing messages, propagating errors, etc.
- * @remark The current rkbuf_reader slice must be limited to the MessageSet size
- *
- * @returns see rd_kafka_msgset_reader_run()
- */
-rd_kafka_resp_err_t
-rd_kafka_msgset_parse (rd_kafka_buf_t *rkbuf,
-                       rd_kafka_buf_t *request,
-                       rd_kafka_toppar_t *rktp,
-                       const struct rd_kafka_toppar_ver *tver) {
-        rd_kafka_msgset_reader_t msetr;
-
-        rd_kafka_msgset_reader_init(&msetr, rkbuf, rktp, tver,
-                                    rktp->rktp_fetchq);
-
-        /* Parse and handle the message set */
-        return rd_kafka_msgset_reader_run(&msetr);
-}
-
-


[17/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_win32.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_win32.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_win32.c
deleted file mode 100644
index b81ff87..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_win32.c
+++ /dev/null
@@ -1,526 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Impelements SASL Kerberos GSSAPI authentication client
- * using the native Win32 SSPI.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_sasl.h"
-#include "rdkafka_sasl_int.h"
-
-
-#include <stdio.h>
-#include <windows.h>
-#include <ntsecapi.h>
-
-#define SECURITY_WIN32
-#pragma comment(lib, "Secur32.lib")
-#include <Sspi.h>
-
-
-#define RD_KAFKA_SASL_SSPI_CTX_ATTRS \
- (ISC_REQ_CONFIDENTIALITY | ISC_REQ_REPLAY_DETECT | \
-  ISC_REQ_SEQUENCE_DETECT | ISC_REQ_CONNECTION)
-
-
- /* Default maximum kerberos token size for newer versions of Windows */
-#define RD_KAFKA_SSPI_MAX_TOKEN_SIZE 48000
-
-
-/**
- * @brief Per-connection SASL state
- */
-typedef struct rd_kafka_sasl_win32_state_s {
-        CredHandle *cred;
-        CtxtHandle *ctx;
-        wchar_t principal[512];
-} rd_kafka_sasl_win32_state_t;
-
-
-/**
- * @returns the string representation of a SECURITY_STATUS error code
- */
-static const char *rd_kafka_sasl_sspi_err2str (SECURITY_STATUS sr) {
-        switch (sr)
-        {
-                case SEC_E_INSUFFICIENT_MEMORY:
-                        return "Insufficient memory";
-                case SEC_E_INTERNAL_ERROR:
-                        return "Internal error";
-                case SEC_E_INVALID_HANDLE:
-                        return "Invalid handle";
-                case SEC_E_INVALID_TOKEN:
-                        return "Invalid token";
-                case SEC_E_LOGON_DENIED:
-                        return "Logon denied";
-                case SEC_E_NO_AUTHENTICATING_AUTHORITY:
-                        return "No authority could be contacted for authentication.";
-                case SEC_E_NO_CREDENTIALS:
-                        return "No credentials";
-                case SEC_E_TARGET_UNKNOWN:
-                        return "Target unknown";
-                case SEC_E_UNSUPPORTED_FUNCTION:
-                        return "Unsupported functionality";
-                case SEC_E_WRONG_CREDENTIAL_HANDLE:
-                        return "The principal that received the authentication "
-                                "request is not the same as the one passed "
-                                "into  the pszTargetName parameter. "
-                                "This indicates a failure in mutual "
-                                "authentication.";
-                default:
-                        return "(no string representation)";
-        }
-}
-
-
-/**
- * @brief Create new CredHandle
- */
-static CredHandle *
-rd_kafka_sasl_sspi_cred_new (rd_kafka_transport_t *rktrans,
-                              char *errstr, size_t errstr_size) {
-        TimeStamp expiry = { 0, 0 };
-        SECURITY_STATUS sr;
-        CredHandle *cred = rd_calloc(1, sizeof(*cred));
-
-        sr = AcquireCredentialsHandle(
-                NULL, __TEXT("Kerberos"), SECPKG_CRED_OUTBOUND,
-                NULL, NULL, NULL, NULL, cred, &expiry);
-
-        if (sr != SEC_E_OK) {
-                rd_free(cred);
-                rd_snprintf(errstr, errstr_size,
-                            "Failed to acquire CredentialsHandle: "
-                            "error code %d", sr);
-                return NULL;
-        }
-
-        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
-                   "Acquired Kerberos credentials handle (expiry in %d.%ds)",
-                   expiry.u.HighPart, expiry.u.LowPart);
-
-        return cred;
-}
-
-
-/**
-  * @brief Start or continue SSPI-based authentication processing.
-  */
-static int rd_kafka_sasl_sspi_continue (rd_kafka_transport_t *rktrans,
-                                        const void *inbuf, size_t insize,
-                                        char *errstr, size_t errstr_size) {
-        rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
-        SecBufferDesc outbufdesc, inbufdesc;
-        SecBuffer outsecbuf, insecbuf;
-        BYTE outbuf[RD_KAFKA_SSPI_MAX_TOKEN_SIZE];
-        TimeStamp lifespan = { 0, 0 };
-        ULONG ret_ctxattrs;
-        CtxtHandle *ctx;
-        SECURITY_STATUS sr;
-
-        if (inbuf) {
-                if (insize > ULONG_MAX) {
-                        rd_snprintf(errstr, errstr_size,
-                                    "Input buffer length too large (%"PRIusz") "
-                                    "and would overflow", insize);
-                        return -1;
-                }
-
-                inbufdesc.ulVersion = SECBUFFER_VERSION;
-                inbufdesc.cBuffers = 1;
-                inbufdesc.pBuffers  = &insecbuf;
-
-                insecbuf.cbBuffer   = (unsigned long)insize;
-                insecbuf.BufferType = SECBUFFER_TOKEN;
-                insecbuf.pvBuffer   = (void *)inbuf;
-        }
-
-        outbufdesc.ulVersion = SECBUFFER_VERSION;
-        outbufdesc.cBuffers  = 1;
-        outbufdesc.pBuffers  = &outsecbuf;
-
-        outsecbuf.cbBuffer   = sizeof(outbuf);
-        outsecbuf.BufferType = SECBUFFER_TOKEN;
-        outsecbuf.pvBuffer   = outbuf;
-
-        if (!(ctx = state->ctx)) {
-                /* First time: allocate context handle
-                 * which will be filled in by Initialize..() */
-                ctx = rd_calloc(1, sizeof(*ctx));
-        }
-
-        sr = InitializeSecurityContext(
-                state->cred, state->ctx, state->principal,
-                RD_KAFKA_SASL_SSPI_CTX_ATTRS |
-                (state->ctx ? 0 : ISC_REQ_MUTUAL_AUTH | ISC_REQ_IDENTIFY),
-                0, SECURITY_NATIVE_DREP,
-                inbuf ? &inbufdesc : NULL,
-                0, ctx, &outbufdesc, &ret_ctxattrs, &lifespan);
-
-        if (!state->ctx)
-                state->ctx = ctx;
-
-        switch (sr)
-        {
-                case SEC_E_OK:
-                        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH",
-                                   "Initialized security context");
-
-                        rktrans->rktrans_sasl.complete = 1;
-                        break;
-                case SEC_I_CONTINUE_NEEDED:
-                        break;
-                case SEC_I_COMPLETE_NEEDED:
-                case SEC_I_COMPLETE_AND_CONTINUE:
-                        rd_snprintf(errstr, errstr_size,
-                                    "CompleteAuthToken (Digest auth, %d) "
-                                    "not implemented", sr);
-                        return -1;
-                case SEC_I_INCOMPLETE_CREDENTIALS:
-                        rd_snprintf(errstr, errstr_size,
-                                    "Incomplete credentials: "
-                                    "invalid or untrusted certificate");
-                        return -1;
-                default:
-                        rd_snprintf(errstr, errstr_size,
-                                    "InitializeSecurityContext "
-                                    "failed: %s (0x%x)",
-                                    rd_kafka_sasl_sspi_err2str(sr), sr);
-                        return -1;
-        }
-
-        if (rd_kafka_sasl_send(rktrans,
-                                outsecbuf.pvBuffer, outsecbuf.cbBuffer,
-                                errstr, errstr_size) == -1)
-                return -1;
-
-        return 0;
-}
-
-
-/**
-* @brief Sends the token response to the broker
-*/
-static int rd_kafka_sasl_win32_send_response (rd_kafka_transport_t *rktrans,
-                                              char *errstr,
-                                              size_t errstr_size,
-                                              SecBuffer *server_token) {
-        rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
-        SECURITY_STATUS sr;
-        SecBuffer in_buffer;
-        SecBuffer out_buffer;
-        SecBuffer buffers[4];
-        SecBufferDesc buffer_desc;
-        SecPkgContext_Sizes sizes;
-        SecPkgCredentials_NamesA names;
-        int send_response;
-        size_t namelen;
-
-        sr = QueryContextAttributes(state->ctx, SECPKG_ATTR_SIZES, &sizes);
-        if (sr != SEC_E_OK) {
-                rd_snprintf(errstr, errstr_size,
-                            "Send response failed: %s (0x%x)",
-                            rd_kafka_sasl_sspi_err2str(sr), sr);
-                return -1;
-        }
-
-        RD_MEMZERO(names);
-        sr = QueryCredentialsAttributesA(state->cred, SECPKG_CRED_ATTR_NAMES,
-                                         &names);
-
-        if (sr != SEC_E_OK) {
-                rd_snprintf(errstr, errstr_size,
-                            "Query credentials failed: %s (0x%x)",
-                            rd_kafka_sasl_sspi_err2str(sr), sr);
-                return -1;
-        }
-
-        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH",
-                   "Sending response message for user: %s", names.sUserName);
-
-        namelen = strlen(names.sUserName) + 1;
-        if (namelen > ULONG_MAX) {
-                rd_snprintf(errstr, errstr_size,
-                            "User name length too large (%"PRIusz") "
-                            "and would overflow");
-                return -1;
-        }
-
-        in_buffer.pvBuffer = (char *)names.sUserName;
-        in_buffer.cbBuffer = (unsigned long)namelen;
-
-        buffer_desc.cBuffers = 4;
-        buffer_desc.pBuffers = buffers;
-        buffer_desc.ulVersion = SECBUFFER_VERSION;
-
-        /* security trailer */
-        buffers[0].cbBuffer = sizes.cbSecurityTrailer;
-        buffers[0].BufferType = SECBUFFER_TOKEN;
-        buffers[0].pvBuffer = rd_calloc(1, sizes.cbSecurityTrailer);
-
-        /* protection level and buffer size received from the server */
-        buffers[1].cbBuffer = server_token->cbBuffer;
-        buffers[1].BufferType = SECBUFFER_DATA;
-        buffers[1].pvBuffer = rd_calloc(1, server_token->cbBuffer);
-        memcpy(buffers[1].pvBuffer, server_token->pvBuffer, server_token->cbBuffer);
-
-        /* user principal */
-        buffers[2].cbBuffer = in_buffer.cbBuffer;
-        buffers[2].BufferType = SECBUFFER_DATA;
-        buffers[2].pvBuffer = rd_calloc(1, buffers[2].cbBuffer);
-        memcpy(buffers[2].pvBuffer, in_buffer.pvBuffer, in_buffer.cbBuffer);
-
-        /* padding */
-        buffers[3].cbBuffer = sizes.cbBlockSize;
-        buffers[3].BufferType = SECBUFFER_PADDING;
-        buffers[3].pvBuffer = rd_calloc(1, buffers[2].cbBuffer);
-
-        sr = EncryptMessage(state->ctx, KERB_WRAP_NO_ENCRYPT, &buffer_desc, 0);
-        if (sr != SEC_E_OK) {
-                rd_snprintf(errstr, errstr_size,
-                            "Encrypt message failed: %s (0x%x)",
-                            rd_kafka_sasl_sspi_err2str(sr), sr);
-
-                FreeContextBuffer(in_buffer.pvBuffer);
-                rd_free(buffers[0].pvBuffer);
-                rd_free(buffers[1].pvBuffer);
-                rd_free(buffers[2].pvBuffer);
-                rd_free(buffers[3].pvBuffer);
-                return -1;
-        }
-
-        out_buffer.cbBuffer = buffers[0].cbBuffer +
-                              buffers[1].cbBuffer +
-                              buffers[2].cbBuffer +
-                              buffers[3].cbBuffer;
-
-        out_buffer.pvBuffer = rd_calloc(1, buffers[0].cbBuffer +
-                                        buffers[1].cbBuffer +
-                                        buffers[2].cbBuffer +
-                                        buffers[3].cbBuffer);
-
-        memcpy(out_buffer.pvBuffer, buffers[0].pvBuffer, buffers[0].cbBuffer);
-
-        memcpy((unsigned char *)out_buffer.pvBuffer + (int)buffers[0].cbBuffer,
-               buffers[1].pvBuffer, buffers[1].cbBuffer);
-
-        memcpy((unsigned char *)out_buffer.pvBuffer +
-                buffers[0].cbBuffer + buffers[1].cbBuffer,
-                buffers[2].pvBuffer, buffers[2].cbBuffer);
-
-        memcpy((unsigned char *)out_buffer.pvBuffer +
-                buffers[0].cbBuffer + buffers[1].cbBuffer + buffers[2].cbBuffer,
-                buffers[3].pvBuffer, buffers[3].cbBuffer);
-
-        send_response = rd_kafka_sasl_send(rktrans,
-                                           out_buffer.pvBuffer,
-                                           out_buffer.cbBuffer,
-                                           errstr, errstr_size);
-
-        FreeContextBuffer(in_buffer.pvBuffer);
-        rd_free(out_buffer.pvBuffer);
-        rd_free(buffers[0].pvBuffer);
-        rd_free(buffers[1].pvBuffer);
-        rd_free(buffers[2].pvBuffer);
-        rd_free(buffers[3].pvBuffer);
-
-        return send_response;
-}
-
-
-/**
-* @brief Unwrap and validate token response from broker.
-*/
-static int rd_kafka_sasl_win32_validate_token (rd_kafka_transport_t *rktrans,
-                                               const void *inbuf,
-                                               size_t insize,
-                                               char *errstr,
-                                               size_t errstr_size) {
-        rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
-        SecBuffer buffers[2];
-        SecBufferDesc buffer_desc;
-        SECURITY_STATUS sr;
-        char supported;
-
-        if (insize > ULONG_MAX) {
-                rd_snprintf(errstr, errstr_size,
-                            "Input buffer length too large (%"PRIusz") "
-                            "and would overflow");
-                return -1;
-        }
-
-        buffer_desc.cBuffers = 2;
-        buffer_desc.pBuffers = buffers;
-        buffer_desc.ulVersion = SECBUFFER_VERSION;
-
-        buffers[0].cbBuffer = (unsigned long)insize;
-        buffers[0].BufferType = SECBUFFER_STREAM;
-        buffers[0].pvBuffer = (void *)inbuf;
-
-        buffers[1].cbBuffer = 0;
-        buffers[1].BufferType = SECBUFFER_DATA;
-        buffers[1].pvBuffer = NULL;
-
-        sr = DecryptMessage(state->ctx, &buffer_desc, 0, NULL);
-        if (sr != SEC_E_OK) {
-                rd_snprintf(errstr, errstr_size,
-                            "Decrypt message failed: %s (0x%x)",
-                            rd_kafka_sasl_sspi_err2str(sr), sr);
-                return -1;
-        }
-
-        if (buffers[1].cbBuffer < 4) {
-                rd_snprintf(errstr, errstr_size,
-                            "Validate token: "
-                            "invalid message");
-                return -1;
-        }
-
-        supported = ((char *)buffers[1].pvBuffer)[0];
-        if (!(supported & 1)) {
-                rd_snprintf(errstr, errstr_size,
-                            "Validate token: "
-                            "server does not support layer");
-                return -1;
-        }
-
-        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH",
-                   "Validated server token");
-
-        return rd_kafka_sasl_win32_send_response(rktrans, errstr,
-                                                 errstr_size, &buffers[1]);
-}
-
-
-/**
-* @brief Handle SASL frame received from broker.
-*/
-static int rd_kafka_sasl_win32_recv (struct rd_kafka_transport_s *rktrans,
-                                     const void *buf, size_t size,
-                                     char *errstr, size_t errstr_size) {
-        rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
-
-        if (rktrans->rktrans_sasl.complete) {
-                if (rd_kafka_sasl_win32_validate_token(
-                        rktrans, buf, size, errstr, errstr_size) == -1) {
-                        rktrans->rktrans_sasl.complete = 0;
-                        return -1;
-                }
-
-                /* Final ack from broker. */
-                rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH",
-                           "Authenticated");
-                rd_kafka_sasl_auth_done(rktrans);
-                return 0;
-        }
-
-        return rd_kafka_sasl_sspi_continue(rktrans, buf, size,
-                                           errstr, errstr_size);
-}
-
-
-/**
-* @brief Decommission SSPI state
-*/
-static void rd_kafka_sasl_win32_close (rd_kafka_transport_t *rktrans) {
-        rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
-
-        if (!state)
-                return;
-
-        if (state->ctx) {
-                DeleteSecurityContext(state->ctx);
-                rd_free(state->ctx);
-        }
-        if (state->cred) {
-                FreeCredentialsHandle(state->cred);
-                rd_free(state->cred);
-        }
-        rd_free(state);
-}
-
-
-static int rd_kafka_sasl_win32_client_new (rd_kafka_transport_t *rktrans,
-                                           const char *hostname,
-                                           char *errstr, size_t errstr_size) {
-        rd_kafka_t *rk = rktrans->rktrans_rkb->rkb_rk;
-        rd_kafka_sasl_win32_state_t *state;
-
-        if (strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) {
-                rd_snprintf(errstr, errstr_size,
-                            "SASL mechanism \"%s\" not supported on platform",
-                            rk->rk_conf.sasl.mechanisms);
-                return -1;
-        }
-
-        state = rd_calloc(1, sizeof(*state));
-        rktrans->rktrans_sasl.state = state;
-
-        _snwprintf(state->principal, RD_ARRAYSIZE(state->principal),
-                   L"%hs/%hs",
-                   rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.principal,
-                   hostname);
-
-        state->cred = rd_kafka_sasl_sspi_cred_new(rktrans, errstr,
-                                                  errstr_size);
-        if (!state->cred)
-                return -1;
-
-        if (rd_kafka_sasl_sspi_continue(rktrans, NULL, 0,
-                                        errstr, errstr_size) == -1)
-                return -1;
-
-        return 0;
-}
-
-/**
- * @brief Validate config
- */
-static int rd_kafka_sasl_win32_conf_validate (rd_kafka_t *rk,
-                                              char *errstr,
-                                              size_t errstr_size) {
-        if (!rk->rk_conf.sasl.principal) {
-                rd_snprintf(errstr, errstr_size,
-                            "sasl.kerberos.principal must be set");
-                return -1;
-        }
-
-        return 0;
-}
-
-const struct rd_kafka_sasl_provider rd_kafka_sasl_win32_provider = {
-        .name          = "Win32 SSPI",
-        .client_new    = rd_kafka_sasl_win32_client_new,
-        .recv          = rd_kafka_sasl_win32_recv,
-        .close         = rd_kafka_sasl_win32_close,
-        .conf_validate = rd_kafka_sasl_win32_conf_validate
-};

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_subscription.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_subscription.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_subscription.c
deleted file mode 100644
index 18a2458..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_subscription.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * This is the high level consumer API which is mutually exclusive
- * with the old legacy simple consumer.
- * Only one of these interfaces may be used on a given rd_kafka_t handle.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_subscription.h"
-
-
-rd_kafka_resp_err_t rd_kafka_unsubscribe (rd_kafka_t *rk) {
-        rd_kafka_cgrp_t *rkcg;
-
-        if (!(rkcg = rd_kafka_cgrp_get(rk)))
-                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
-        return rd_kafka_op_err_destroy(rd_kafka_op_req2(rkcg->rkcg_ops,
-                                                        RD_KAFKA_OP_SUBSCRIBE));
-}
-
-
-/** @returns 1 if the topic is invalid (bad regex, empty), else 0 if valid. */
-static size_t _invalid_topic_cb (const rd_kafka_topic_partition_t *rktpar,
-                                 void *opaque) {
-        rd_regex_t *re;
-        char errstr[1];
-
-        if (!*rktpar->topic)
-                return 1;
-
-        if (*rktpar->topic != '^')
-                return 0;
-
-        if (!(re = rd_regex_comp(rktpar->topic, errstr, sizeof(errstr))))
-                return 1;
-
-        rd_regex_destroy(re);
-
-        return 0;
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_subscribe (rd_kafka_t *rk,
-                    const rd_kafka_topic_partition_list_t *topics) {
-
-        rd_kafka_op_t *rko;
-        rd_kafka_cgrp_t *rkcg;
-
-        if (!(rkcg = rd_kafka_cgrp_get(rk)))
-                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
-        /* Validate topics */
-        if (topics->cnt == 0 ||
-            rd_kafka_topic_partition_list_sum(topics,
-                                              _invalid_topic_cb, NULL) > 0)
-                return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
-        rko = rd_kafka_op_new(RD_KAFKA_OP_SUBSCRIBE);
-	rko->rko_u.subscribe.topics = rd_kafka_topic_partition_list_copy(topics);
-
-        return rd_kafka_op_err_destroy(
-                rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE));
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_assign (rd_kafka_t *rk,
-                 const rd_kafka_topic_partition_list_t *partitions) {
-        rd_kafka_op_t *rko;
-        rd_kafka_cgrp_t *rkcg;
-
-        if (!(rkcg = rd_kafka_cgrp_get(rk)))
-                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
-        rko = rd_kafka_op_new(RD_KAFKA_OP_ASSIGN);
-	if (partitions)
-		rko->rko_u.assign.partitions =
-                        rd_kafka_topic_partition_list_copy(partitions);
-
-        return rd_kafka_op_err_destroy(
-                rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE));
-}
-
-
-
-rd_kafka_resp_err_t
-rd_kafka_assignment (rd_kafka_t *rk,
-                     rd_kafka_topic_partition_list_t **partitions) {
-        rd_kafka_op_t *rko;
-        rd_kafka_resp_err_t err;
-        rd_kafka_cgrp_t *rkcg;
-
-        if (!(rkcg = rd_kafka_cgrp_get(rk)))
-                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
-        rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_GET_ASSIGNMENT);
-	if (!rko)
-		return RD_KAFKA_RESP_ERR__TIMED_OUT;
-
-        err = rko->rko_err;
-
-        *partitions = rko->rko_u.assign.partitions;
-	rko->rko_u.assign.partitions = NULL;
-        rd_kafka_op_destroy(rko);
-
-        if (!*partitions && !err) {
-                /* Create an empty list for convenience of the caller */
-                *partitions = rd_kafka_topic_partition_list_new(0);
-        }
-
-        return err;
-}
-
-rd_kafka_resp_err_t
-rd_kafka_subscription (rd_kafka_t *rk,
-                       rd_kafka_topic_partition_list_t **topics){
-	rd_kafka_op_t *rko;
-        rd_kafka_resp_err_t err;
-        rd_kafka_cgrp_t *rkcg;
-
-        if (!(rkcg = rd_kafka_cgrp_get(rk)))
-                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
-        rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_GET_SUBSCRIPTION);
-	if (!rko)
-		return RD_KAFKA_RESP_ERR__TIMED_OUT;
-
-        err = rko->rko_err;
-
-        *topics = rko->rko_u.subscribe.topics;
-	rko->rko_u.subscribe.topics = NULL;
-        rd_kafka_op_destroy(rko);
-
-        if (!*topics && !err) {
-                /* Create an empty list for convenience of the caller */
-                *topics = rd_kafka_topic_partition_list_new(0);
-        }
-
-        return err;
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_pause_partitions (rd_kafka_t *rk,
-			   rd_kafka_topic_partition_list_t *partitions) {
-	return rd_kafka_toppars_pause_resume(rk, 1, RD_KAFKA_TOPPAR_F_APP_PAUSE,
-					     partitions);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_resume_partitions (rd_kafka_t *rk,
-			   rd_kafka_topic_partition_list_t *partitions) {
-	return rd_kafka_toppars_pause_resume(rk, 0, RD_KAFKA_TOPPAR_F_APP_PAUSE,
-					     partitions);
-}
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_subscription.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_subscription.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_subscription.h
deleted file mode 100644
index 0c51712..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_subscription.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_timer.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_timer.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_timer.c
deleted file mode 100644
index 7947980..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_timer.c
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rd.h"
-#include "rdtime.h"
-#include "rdsysqueue.h"
-
-
-static RD_INLINE void rd_kafka_timers_lock (rd_kafka_timers_t *rkts) {
-        mtx_lock(&rkts->rkts_lock);
-}
-
-static RD_INLINE void rd_kafka_timers_unlock (rd_kafka_timers_t *rkts) {
-        mtx_unlock(&rkts->rkts_lock);
-}
-
-
-static RD_INLINE int rd_kafka_timer_started (const rd_kafka_timer_t *rtmr) {
-	return rtmr->rtmr_interval ? 1 : 0;
-}
-
-
-static RD_INLINE int rd_kafka_timer_scheduled (const rd_kafka_timer_t *rtmr) {
-	return rtmr->rtmr_next ? 1 : 0;
-}
-
-
-static int rd_kafka_timer_cmp (const void *_a, const void *_b) {
-	const rd_kafka_timer_t *a = _a, *b = _b;
-	return (int)(a->rtmr_next - b->rtmr_next);
-}
-
-static void rd_kafka_timer_unschedule (rd_kafka_timers_t *rkts,
-                                       rd_kafka_timer_t *rtmr) {
-	TAILQ_REMOVE(&rkts->rkts_timers, rtmr, rtmr_link);
-	rtmr->rtmr_next = 0;
-}
-
-static void rd_kafka_timer_schedule (rd_kafka_timers_t *rkts,
-				     rd_kafka_timer_t *rtmr, int extra_us) {
-	rd_kafka_timer_t *first;
-
-	/* Timer has been stopped */
-	if (!rtmr->rtmr_interval)
-		return;
-
-        /* Timers framework is terminating */
-        if (unlikely(!rkts->rkts_enabled))
-                return;
-
-	rtmr->rtmr_next = rd_clock() + rtmr->rtmr_interval + extra_us;
-
-	if (!(first = TAILQ_FIRST(&rkts->rkts_timers)) ||
-	    first->rtmr_next > rtmr->rtmr_next) {
-		TAILQ_INSERT_HEAD(&rkts->rkts_timers, rtmr, rtmr_link);
-                cnd_signal(&rkts->rkts_cond);
-	} else
-		TAILQ_INSERT_SORTED(&rkts->rkts_timers, rtmr,
-                                    rd_kafka_timer_t *, rtmr_link,
-				    rd_kafka_timer_cmp);
-}
-
-/**
- * Stop a timer that may be started.
- * If called from inside a timer callback 'lock' must be 0, else 1.
- */
-void rd_kafka_timer_stop (rd_kafka_timers_t *rkts, rd_kafka_timer_t *rtmr,
-                          int lock) {
-	if (lock)
-		rd_kafka_timers_lock(rkts);
-
-	if (!rd_kafka_timer_started(rtmr)) {
-		if (lock)
-			rd_kafka_timers_unlock(rkts);
-		return;
-	}
-
-	if (rd_kafka_timer_scheduled(rtmr))
-		rd_kafka_timer_unschedule(rkts, rtmr);
-
-	rtmr->rtmr_interval = 0;
-
-	if (lock)
-		rd_kafka_timers_unlock(rkts);
-}
-
-
-/**
- * Start the provided timer with the given interval.
- * Upon expiration of the interval (us) the callback will be called in the
- * main rdkafka thread, after callback return the timer will be restarted.
- *
- * Use rd_kafka_timer_stop() to stop a timer.
- */
-void rd_kafka_timer_start (rd_kafka_timers_t *rkts,
-			   rd_kafka_timer_t *rtmr, rd_ts_t interval,
-			   void (*callback) (rd_kafka_timers_t *rkts, void *arg),
-			   void *arg) {
-	rd_kafka_timers_lock(rkts);
-
-	rd_kafka_timer_stop(rkts, rtmr, 0/*!lock*/);
-
-	rtmr->rtmr_interval = interval;
-	rtmr->rtmr_callback = callback;
-	rtmr->rtmr_arg      = arg;
-
-	rd_kafka_timer_schedule(rkts, rtmr, 0);
-
-	rd_kafka_timers_unlock(rkts);
-}
-
-
-/**
- * Delay the next timer invocation by 'backoff_us'
- */
-void rd_kafka_timer_backoff (rd_kafka_timers_t *rkts,
-			     rd_kafka_timer_t *rtmr, int backoff_us) {
-	rd_kafka_timers_lock(rkts);
-	if (rd_kafka_timer_scheduled(rtmr))
-		rd_kafka_timer_unschedule(rkts, rtmr);
-	rd_kafka_timer_schedule(rkts, rtmr, backoff_us);
-	rd_kafka_timers_unlock(rkts);
-}
-
-
-/**
- * @returns the delta time to the next time (>=0) this timer fires, or -1
- *          if timer is stopped.
- */
-rd_ts_t rd_kafka_timer_next (rd_kafka_timers_t *rkts, rd_kafka_timer_t *rtmr,
-                             int do_lock) {
-        rd_ts_t now = rd_clock();
-        rd_ts_t delta = -1;
-
-        if (do_lock)
-                rd_kafka_timers_lock(rkts);
-
-        if (rd_kafka_timer_scheduled(rtmr)) {
-                delta = rtmr->rtmr_next - now;
-                if (delta < 0)
-                        delta = 0;
-        }
-
-        if (do_lock)
-                rd_kafka_timers_unlock(rkts);
-
-        return delta;
-}
-
-
-/**
- * Interrupt rd_kafka_timers_run().
- * Used for termination.
- */
-void rd_kafka_timers_interrupt (rd_kafka_timers_t *rkts) {
-	rd_kafka_timers_lock(rkts);
-	cnd_signal(&rkts->rkts_cond);
-	rd_kafka_timers_unlock(rkts);
-}
-
-
-/**
- * Returns the delta time to the next timer to fire, capped by 'timeout_ms'.
- */
-rd_ts_t rd_kafka_timers_next (rd_kafka_timers_t *rkts, int timeout_us,
-			      int do_lock) {
-	rd_ts_t now = rd_clock();
-	rd_ts_t sleeptime = 0;
-	rd_kafka_timer_t *rtmr;
-
-	if (do_lock)
-		rd_kafka_timers_lock(rkts);
-
-	if (likely((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) != NULL)) {
-		sleeptime = rtmr->rtmr_next - now;
-		if (sleeptime < 0)
-			sleeptime = 0;
-		else if (sleeptime > (rd_ts_t)timeout_us)
-			sleeptime = (rd_ts_t)timeout_us;
-	} else
-		sleeptime = (rd_ts_t)timeout_us;
-
-	if (do_lock)
-		rd_kafka_timers_unlock(rkts);
-
-	return sleeptime;
-}
-
-
-/**
- * Dispatch timers.
- * Will block up to 'timeout' microseconds before returning.
- */
-void rd_kafka_timers_run (rd_kafka_timers_t *rkts, int timeout_us) {
-	rd_ts_t now = rd_clock();
-	rd_ts_t end = now + timeout_us;
-
-        rd_kafka_timers_lock(rkts);
-
-	while (!rd_atomic32_get(&rkts->rkts_rk->rk_terminate) && now <= end) {
-		int64_t sleeptime;
-		rd_kafka_timer_t *rtmr;
-
-		if (timeout_us != RD_POLL_NOWAIT) {
-			sleeptime = rd_kafka_timers_next(rkts,
-							 timeout_us,
-							 0/*no-lock*/);
-
-			if (sleeptime > 0) {
-				cnd_timedwait_ms(&rkts->rkts_cond,
-						 &rkts->rkts_lock,
-						 (int)(sleeptime / 1000));
-
-			}
-		}
-
-		now = rd_clock();
-
-		while ((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) &&
-		       rtmr->rtmr_next <= now) {
-
-			rd_kafka_timer_unschedule(rkts, rtmr);
-                        rd_kafka_timers_unlock(rkts);
-
-			rtmr->rtmr_callback(rkts, rtmr->rtmr_arg);
-
-                        rd_kafka_timers_lock(rkts);
-			/* Restart timer, unless it has been stopped, or
-			 * already reschedueld (start()ed) from callback. */
-			if (rd_kafka_timer_started(rtmr) &&
-			    !rd_kafka_timer_scheduled(rtmr))
-				rd_kafka_timer_schedule(rkts, rtmr, 0);
-		}
-
-		if (timeout_us == RD_POLL_NOWAIT) {
-			/* Only iterate once, even if rd_clock doesn't change */
-			break;
-		}
-	}
-
-	rd_kafka_timers_unlock(rkts);
-}
-
-
-void rd_kafka_timers_destroy (rd_kafka_timers_t *rkts) {
-        rd_kafka_timer_t *rtmr;
-
-        rd_kafka_timers_lock(rkts);
-        rkts->rkts_enabled = 0;
-        while ((rtmr = TAILQ_FIRST(&rkts->rkts_timers)))
-                rd_kafka_timer_stop(rkts, rtmr, 0);
-        rd_kafka_assert(rkts->rkts_rk, TAILQ_EMPTY(&rkts->rkts_timers));
-        rd_kafka_timers_unlock(rkts);
-
-        cnd_destroy(&rkts->rkts_cond);
-        mtx_destroy(&rkts->rkts_lock);
-}
-
-void rd_kafka_timers_init (rd_kafka_timers_t *rkts, rd_kafka_t *rk) {
-        memset(rkts, 0, sizeof(*rkts));
-        rkts->rkts_rk = rk;
-        TAILQ_INIT(&rkts->rkts_timers);
-        mtx_init(&rkts->rkts_lock, mtx_plain);
-        cnd_init(&rkts->rkts_cond);
-        rkts->rkts_enabled = 1;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_timer.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_timer.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_timer.h
deleted file mode 100644
index de01795..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_timer.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include "rd.h"
-
-/* A timer engine. */
-typedef struct rd_kafka_timers_s {
-
-        TAILQ_HEAD(, rd_kafka_timer_s) rkts_timers;
-
-        struct rd_kafka_s *rkts_rk;
-
-	mtx_t       rkts_lock;
-	cnd_t       rkts_cond;
-
-        int         rkts_enabled;
-} rd_kafka_timers_t;
-
-
-typedef struct rd_kafka_timer_s {
-	TAILQ_ENTRY(rd_kafka_timer_s)  rtmr_link;
-
-	rd_ts_t rtmr_next;
-	rd_ts_t rtmr_interval;   /* interval in microseconds */
-
-	void  (*rtmr_callback) (rd_kafka_timers_t *rkts, void *arg);
-	void   *rtmr_arg;
-} rd_kafka_timer_t;
-
-
-
-void rd_kafka_timer_stop (rd_kafka_timers_t *rkts,
-                          rd_kafka_timer_t *rtmr, int lock);
-void rd_kafka_timer_start (rd_kafka_timers_t *rkts,
-			   rd_kafka_timer_t *rtmr, rd_ts_t interval,
-			   void (*callback) (rd_kafka_timers_t *rkts,
-                                             void *arg),
-			   void *arg);
-
-void rd_kafka_timer_backoff (rd_kafka_timers_t *rkts,
-			     rd_kafka_timer_t *rtmr, int backoff_us);
-rd_ts_t rd_kafka_timer_next (rd_kafka_timers_t *rkts, rd_kafka_timer_t *rtmr,
-                             int do_lock);
-
-void rd_kafka_timers_interrupt (rd_kafka_timers_t *rkts);
-rd_ts_t rd_kafka_timers_next (rd_kafka_timers_t *rkts, int timeout_ms,
-			      int do_lock);
-void rd_kafka_timers_run (rd_kafka_timers_t *rkts, int timeout_us);
-void rd_kafka_timers_destroy (rd_kafka_timers_t *rkts);
-void rd_kafka_timers_init (rd_kafka_timers_t *rkte, rd_kafka_t *rk);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_topic.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_topic.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_topic.c
deleted file mode 100644
index 3975d80..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_topic.c
+++ /dev/null
@@ -1,1306 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_msg.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_cgrp.h"
-#include "rdkafka_metadata.h"
-#include "rdlog.h"
-#include "rdsysqueue.h"
-#include "rdtime.h"
-#include "rdregex.h"
-
-const char *rd_kafka_topic_state_names[] = {
-        "unknown",
-        "exists",
-        "notexists"
-};
-
-
-
-static int
-rd_kafka_topic_metadata_update (rd_kafka_itopic_t *rkt,
-                                const struct rd_kafka_metadata_topic *mdt,
-                                rd_ts_t ts_insert);
-
-
-/**
- * @brief Increases the app's topic reference count and returns the app pointer.
- *
- * The app refcounts are implemented separately from the librdkafka refcounts
- * and to play nicely with shptr we keep one single shptr for the application
- * and increase/decrease a separate rkt_app_refcnt to keep track of its use.
- *
- * This only covers topic_new() & topic_destroy().
- * The topic_t exposed in rd_kafka_message_t is NOT covered and is handled
- * like a standard shptr -> app pointer conversion (keep_a()).
- *
- * @returns a (new) rkt app reference.
- *
- * @remark \p rkt and \p s_rkt are mutually exclusive.
- */
-static rd_kafka_topic_t *rd_kafka_topic_keep_app (rd_kafka_itopic_t *rkt) {
-	rd_kafka_topic_t *app_rkt;
-
-        mtx_lock(&rkt->rkt_app_lock);
-	rkt->rkt_app_refcnt++;
-        if (!(app_rkt = rkt->rkt_app_rkt))
-                app_rkt = rkt->rkt_app_rkt = rd_kafka_topic_keep_a(rkt);
-        mtx_unlock(&rkt->rkt_app_lock);
-
-	return app_rkt;
-}
-
-/**
- * @brief drop rkt app reference
- */
-static void rd_kafka_topic_destroy_app (rd_kafka_topic_t *app_rkt) {
-	rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt);
-        shptr_rd_kafka_itopic_t *s_rkt = NULL;
-
-        mtx_lock(&rkt->rkt_app_lock);
-	rd_kafka_assert(NULL, rkt->rkt_app_refcnt > 0);
-	rkt->rkt_app_refcnt--;
-        if (unlikely(rkt->rkt_app_refcnt == 0)) {
-		rd_kafka_assert(NULL, rkt->rkt_app_rkt);
-		s_rkt = rd_kafka_topic_a2s(app_rkt);
-                rkt->rkt_app_rkt = NULL;
-	}
-        mtx_unlock(&rkt->rkt_app_lock);
-
-	if (s_rkt) /* final app reference lost, destroy the shared ptr. */
-		rd_kafka_topic_destroy0(s_rkt);
-}
-
-
-/**
- * Final destructor for topic. Refcnt must be 0.
- */
-void rd_kafka_topic_destroy_final (rd_kafka_itopic_t *rkt) {
-
-	rd_kafka_assert(rkt->rkt_rk, rd_refcnt_get(&rkt->rkt_refcnt) == 0);
-
-        rd_kafka_wrlock(rkt->rkt_rk);
-        TAILQ_REMOVE(&rkt->rkt_rk->rk_topics, rkt, rkt_link);
-        rkt->rkt_rk->rk_topic_cnt--;
-        rd_kafka_wrunlock(rkt->rkt_rk);
-
-        rd_kafka_assert(rkt->rkt_rk, rd_list_empty(&rkt->rkt_desp));
-        rd_list_destroy(&rkt->rkt_desp);
-
-	if (rkt->rkt_topic)
-		rd_kafkap_str_destroy(rkt->rkt_topic);
-
-	rd_kafka_anyconf_destroy(_RK_TOPIC, &rkt->rkt_conf);
-
-        mtx_destroy(&rkt->rkt_app_lock);
-	rwlock_destroy(&rkt->rkt_lock);
-        rd_refcnt_destroy(&rkt->rkt_refcnt);
-
-	rd_free(rkt);
-}
-
-/**
- * Application destroy
- */
-void rd_kafka_topic_destroy (rd_kafka_topic_t *app_rkt) {
-	rd_kafka_topic_destroy_app(app_rkt);
-}
-
-
-/**
- * Finds and returns a topic based on its name, or NULL if not found.
- * The 'rkt' refcount is increased by one and the caller must call
- * rd_kafka_topic_destroy() when it is done with the topic to decrease
- * the refcount.
- *
- * Locality: any thread
- */
-shptr_rd_kafka_itopic_t *rd_kafka_topic_find_fl (const char *func, int line,
-                                                rd_kafka_t *rk,
-                                                const char *topic, int do_lock){
-	rd_kafka_itopic_t *rkt;
-        shptr_rd_kafka_itopic_t *s_rkt = NULL;
-
-        if (do_lock)
-                rd_kafka_rdlock(rk);
-	TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
-		if (!rd_kafkap_str_cmp_str(rkt->rkt_topic, topic)) {
-                        s_rkt = rd_kafka_topic_keep(rkt);
-			break;
-		}
-	}
-        if (do_lock)
-                rd_kafka_rdunlock(rk);
-
-	return s_rkt;
-}
-
-/**
- * Same semantics as ..find() but takes a Kafka protocol string instead.
- */
-shptr_rd_kafka_itopic_t *rd_kafka_topic_find0_fl (const char *func, int line,
-                                                 rd_kafka_t *rk,
-                                                 const rd_kafkap_str_t *topic) {
-	rd_kafka_itopic_t *rkt;
-        shptr_rd_kafka_itopic_t *s_rkt = NULL;
-
-	rd_kafka_rdlock(rk);
-	TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
-		if (!rd_kafkap_str_cmp(rkt->rkt_topic, topic)) {
-                        s_rkt = rd_kafka_topic_keep(rkt);
-			break;
-		}
-	}
-	rd_kafka_rdunlock(rk);
-
-	return s_rkt;
-}
-
-
-/**
- * Compare shptr_rd_kafka_itopic_t for underlying itopic_t
- */
-int rd_kafka_topic_cmp_s_rkt (const void *_a, const void *_b) {
-        shptr_rd_kafka_itopic_t *a = (void *)_a, *b = (void *)_b;
-        rd_kafka_itopic_t *rkt_a = rd_kafka_topic_s2i(a);
-        rd_kafka_itopic_t *rkt_b = rd_kafka_topic_s2i(b);
-
-        if (rkt_a == rkt_b)
-                return 0;
-
-        return rd_kafkap_str_cmp(rkt_a->rkt_topic, rkt_b->rkt_topic);
-}
-
-
-/**
- * Create new topic handle. 
- *
- * Locality: any
- */
-shptr_rd_kafka_itopic_t *rd_kafka_topic_new0 (rd_kafka_t *rk,
-                                              const char *topic,
-                                              rd_kafka_topic_conf_t *conf,
-                                              int *existing,
-                                              int do_lock) {
-	rd_kafka_itopic_t *rkt;
-        shptr_rd_kafka_itopic_t *s_rkt;
-        const struct rd_kafka_metadata_cache_entry *rkmce;
-
-	/* Verify configuration.
-	 * Maximum topic name size + headers must never exceed message.max.bytes
-	 * which is min-capped to 1000.
-	 * See rd_kafka_broker_produce_toppar() and rdkafka_conf.c */
-	if (!topic || strlen(topic) > 512) {
-		if (conf)
-			rd_kafka_topic_conf_destroy(conf);
-		rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG,
-					EINVAL);
-		return NULL;
-	}
-
-	if (do_lock)
-                rd_kafka_wrlock(rk);
-	if ((s_rkt = rd_kafka_topic_find(rk, topic, 0/*no lock*/))) {
-                if (do_lock)
-                        rd_kafka_wrunlock(rk);
-		if (conf)
-			rd_kafka_topic_conf_destroy(conf);
-                if (existing)
-                        *existing = 1;
-		return s_rkt;
-        }
-
-        if (existing)
-                *existing = 0;
-
-	rkt = rd_calloc(1, sizeof(*rkt));
-
-	rkt->rkt_topic     = rd_kafkap_str_new(topic, -1);
-	rkt->rkt_rk        = rk;
-
-	if (!conf) {
-                if (rk->rk_conf.topic_conf)
-                        conf = rd_kafka_topic_conf_dup(rk->rk_conf.topic_conf);
-                else
-                        conf = rd_kafka_topic_conf_new();
-        }
-	rkt->rkt_conf = *conf;
-	rd_free(conf); /* explicitly not rd_kafka_topic_destroy()
-                        * since we dont want to rd_free internal members,
-                        * just the placeholder. The internal members
-                        * were copied on the line above. */
-
-	/* Default partitioner: consistent_random */
-	if (!rkt->rkt_conf.partitioner)
-		rkt->rkt_conf.partitioner = rd_kafka_msg_partitioner_consistent_random;
-
-	if (rkt->rkt_conf.compression_codec == RD_KAFKA_COMPRESSION_INHERIT)
-		rkt->rkt_conf.compression_codec = rk->rk_conf.compression_codec;
-
-	rd_kafka_dbg(rk, TOPIC, "TOPIC", "New local topic: %.*s",
-		     RD_KAFKAP_STR_PR(rkt->rkt_topic));
-
-        rd_list_init(&rkt->rkt_desp, 16, NULL);
-        rd_refcnt_init(&rkt->rkt_refcnt, 0);
-
-        s_rkt = rd_kafka_topic_keep(rkt);
-
-	rwlock_init(&rkt->rkt_lock);
-        mtx_init(&rkt->rkt_app_lock, mtx_plain);
-
-	/* Create unassigned partition */
-	rkt->rkt_ua = rd_kafka_toppar_new(rkt, RD_KAFKA_PARTITION_UA);
-
-	TAILQ_INSERT_TAIL(&rk->rk_topics, rkt, rkt_link);
-	rk->rk_topic_cnt++;
-
-        /* Populate from metadata cache. */
-        if ((rkmce = rd_kafka_metadata_cache_find(rk, topic, 1/*valid*/))) {
-                if (existing)
-                        *existing = 1;
-
-                rd_kafka_topic_metadata_update(rkt, &rkmce->rkmce_mtopic,
-                                               rkmce->rkmce_ts_insert);
-        }
-
-        if (do_lock)
-                rd_kafka_wrunlock(rk);
-
-	return s_rkt;
-}
-
-
-
-/**
- * Create new app topic handle.
- *
- * Locality: application thread
- */
-rd_kafka_topic_t *rd_kafka_topic_new (rd_kafka_t *rk, const char *topic,
-                                      rd_kafka_topic_conf_t *conf) {
-        shptr_rd_kafka_itopic_t *s_rkt;
-        rd_kafka_itopic_t *rkt;
-        rd_kafka_topic_t *app_rkt;
-        int existing;
-
-        s_rkt = rd_kafka_topic_new0(rk, topic, conf, &existing, 1/*lock*/);
-        if (!s_rkt)
-                return NULL;
-
-        rkt = rd_kafka_topic_s2i(s_rkt);
-
-        /* Save a shared pointer to be used in callbacks. */
-	app_rkt = rd_kafka_topic_keep_app(rkt);
-
-        /* Query for the topic leader (async) */
-        if (!existing)
-                rd_kafka_topic_leader_query(rk, rkt);
-
-        /* Drop our reference since there is already/now a rkt_app_rkt */
-        rd_kafka_topic_destroy0(s_rkt);
-
-        return app_rkt;
-}
-
-
-
-/**
- * Sets the state for topic.
- * NOTE: rd_kafka_topic_wrlock(rkt) MUST be held
- */
-static void rd_kafka_topic_set_state (rd_kafka_itopic_t *rkt, int state) {
-
-        if ((int)rkt->rkt_state == state)
-                return;
-
-        rd_kafka_dbg(rkt->rkt_rk, TOPIC, "STATE",
-                     "Topic %s changed state %s -> %s",
-                     rkt->rkt_topic->str,
-                     rd_kafka_topic_state_names[rkt->rkt_state],
-                     rd_kafka_topic_state_names[state]);
-        rkt->rkt_state = state;
-}
-
-/**
- * Returns the name of a topic.
- * NOTE:
- *   The topic Kafka String representation is crafted with an extra byte
- *   at the end for the Nul that is not included in the length, this way
- *   we can use the topic's String directly.
- *   This is not true for Kafka Strings read from the network.
- */
-const char *rd_kafka_topic_name (const rd_kafka_topic_t *app_rkt) {
-        const rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt);
-	return rkt->rkt_topic->str;
-}
-
-
-
-
-
-/**
- * @brief Update the leader for a topic+partition.
- * @returns 1 if the leader was changed, else 0, or -1 if leader is unknown.
- *
- * @locks rd_kafka_topic_wrlock(rkt) and rd_kafka_toppar_lock(rktp)
- * @locality any
- */
-int rd_kafka_toppar_leader_update (rd_kafka_toppar_t *rktp,
-                                   int32_t leader_id, rd_kafka_broker_t *rkb) {
-
-        rktp->rktp_leader_id = leader_id;
-        if (rktp->rktp_leader_id != leader_id) {
-                rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPICUPD",
-                             "Topic %s [%"PRId32"] migrated from "
-                             "leader %"PRId32" to %"PRId32,
-                             rktp->rktp_rkt->rkt_topic->str,
-                             rktp->rktp_partition,
-                             rktp->rktp_leader_id, leader_id);
-                rktp->rktp_leader_id = leader_id;
-        }
-
-	if (!rkb) {
-		int had_leader = rktp->rktp_leader ? 1 : 0;
-
-		rd_kafka_toppar_broker_delegate(rktp, NULL, 0);
-
-		return had_leader ? -1 : 0;
-	}
-
-
-	if (rktp->rktp_leader) {
-		if (rktp->rktp_leader == rkb) {
-			/* No change in broker */
-			return 0;
-		}
-
-		rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPICUPD",
-			     "Topic %s [%"PRId32"] migrated from "
-			     "broker %"PRId32" to %"PRId32,
-			     rktp->rktp_rkt->rkt_topic->str,
-			     rktp->rktp_partition,
-			     rktp->rktp_leader->rkb_nodeid, rkb->rkb_nodeid);
-	}
-
-	rd_kafka_toppar_broker_delegate(rktp, rkb, 0);
-
-	return 1;
-}
-
-
-static int rd_kafka_toppar_leader_update2 (rd_kafka_itopic_t *rkt,
-					   int32_t partition,
-                                           int32_t leader_id,
-					   rd_kafka_broker_t *rkb) {
-	rd_kafka_toppar_t *rktp;
-        shptr_rd_kafka_toppar_t *s_rktp;
-	int r;
-
-	s_rktp = rd_kafka_toppar_get(rkt, partition, 0);
-        if (unlikely(!s_rktp)) {
-                /* Have only seen this in issue #132.
-                 * Probably caused by corrupt broker state. */
-                rd_kafka_log(rkt->rkt_rk, LOG_WARNING, "LEADER",
-                             "%s [%"PRId32"] is unknown "
-                             "(partition_cnt %i)",
-                             rkt->rkt_topic->str, partition,
-                             rkt->rkt_partition_cnt);
-                return -1;
-        }
-
-        rktp = rd_kafka_toppar_s2i(s_rktp);
-
-        rd_kafka_toppar_lock(rktp);
-        r = rd_kafka_toppar_leader_update(rktp, leader_id, rkb);
-        rd_kafka_toppar_unlock(rktp);
-
-	rd_kafka_toppar_destroy(s_rktp); /* from get() */
-
-	return r;
-}
-
-
-/**
- * Update the number of partitions for a topic and takes according actions.
- * Returns 1 if the partition count changed, else 0.
- * NOTE: rd_kafka_topic_wrlock(rkt) MUST be held.
- */
-static int rd_kafka_topic_partition_cnt_update (rd_kafka_itopic_t *rkt,
-						int32_t partition_cnt) {
-	rd_kafka_t *rk = rkt->rkt_rk;
-	shptr_rd_kafka_toppar_t **rktps;
-	shptr_rd_kafka_toppar_t *rktp_ua;
-        shptr_rd_kafka_toppar_t *s_rktp;
-	rd_kafka_toppar_t *rktp;
-	rd_kafka_msgq_t tmpq = RD_KAFKA_MSGQ_INITIALIZER(tmpq);
-	int32_t i;
-
-	if (likely(rkt->rkt_partition_cnt == partition_cnt))
-		return 0; /* No change in partition count */
-
-        if (unlikely(rkt->rkt_partition_cnt != 0 &&
-                     !rd_kafka_terminating(rkt->rkt_rk)))
-                rd_kafka_log(rk, LOG_NOTICE, "PARTCNT",
-                             "Topic %s partition count changed "
-                             "from %"PRId32" to %"PRId32,
-                             rkt->rkt_topic->str,
-                             rkt->rkt_partition_cnt, partition_cnt);
-        else
-                rd_kafka_dbg(rk, TOPIC, "PARTCNT",
-                             "Topic %s partition count changed "
-                             "from %"PRId32" to %"PRId32,
-                             rkt->rkt_topic->str,
-                             rkt->rkt_partition_cnt, partition_cnt);
-
-
-	/* Create and assign new partition list */
-	if (partition_cnt > 0)
-		rktps = rd_calloc(partition_cnt, sizeof(*rktps));
-	else
-		rktps = NULL;
-
-	for (i = 0 ; i < partition_cnt ; i++) {
-		if (i >= rkt->rkt_partition_cnt) {
-			/* New partition. Check if its in the list of
-			 * desired partitions first. */
-
-                        s_rktp = rd_kafka_toppar_desired_get(rkt, i);
-
-                        rktp = s_rktp ? rd_kafka_toppar_s2i(s_rktp) : NULL;
-                        if (rktp) {
-				rd_kafka_toppar_lock(rktp);
-                                rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_UNKNOWN;
-
-                                /* Remove from desp list since the
-                                 * partition is now known. */
-                                rd_kafka_toppar_desired_unlink(rktp);
-                                rd_kafka_toppar_unlock(rktp);
-			} else
-				s_rktp = rd_kafka_toppar_new(rkt, i);
-			rktps[i] = s_rktp;
-		} else {
-			/* Existing partition, grab our own reference. */
-			rktps[i] = rd_kafka_toppar_keep(
-				rd_kafka_toppar_s2i(rkt->rkt_p[i]));
-			/* Loose previous ref */
-			rd_kafka_toppar_destroy(rkt->rkt_p[i]);
-		}
-	}
-
-	rktp_ua = rd_kafka_toppar_get(rkt, RD_KAFKA_PARTITION_UA, 0);
-
-        /* Propagate notexist errors for desired partitions */
-        RD_LIST_FOREACH(s_rktp, &rkt->rkt_desp, i) {
-                rd_kafka_dbg(rkt->rkt_rk, TOPIC, "DESIRED",
-                             "%s [%"PRId32"]: "
-                             "desired partition does not exist in cluster",
-                             rkt->rkt_topic->str,
-                             rd_kafka_toppar_s2i(s_rktp)->rktp_partition);
-                rd_kafka_toppar_enq_error(rd_kafka_toppar_s2i(s_rktp),
-                                          RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION);
-        }
-
-	/* Remove excessive partitions */
-	for (i = partition_cnt ; i < rkt->rkt_partition_cnt ; i++) {
-		s_rktp = rkt->rkt_p[i];
-                rktp = rd_kafka_toppar_s2i(s_rktp);
-
-		rd_kafka_dbg(rkt->rkt_rk, TOPIC, "REMOVE",
-			     "%s [%"PRId32"] no longer reported in metadata",
-			     rkt->rkt_topic->str, rktp->rktp_partition);
-
-		rd_kafka_toppar_lock(rktp);
-
-		if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED) {
-                        rd_kafka_dbg(rkt->rkt_rk, TOPIC, "DESIRED",
-                                     "Topic %s [%"PRId32"] is desired "
-                                     "but no longer known: "
-                                     "moving back on desired list",
-                                     rkt->rkt_topic->str, rktp->rktp_partition);
-
-                        /* If this is a desired partition move it back on to
-                         * the desired list since partition is no longer known*/
-			rd_kafka_assert(rkt->rkt_rk,
-                                        !(rktp->rktp_flags &
-                                          RD_KAFKA_TOPPAR_F_UNKNOWN));
-			rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_UNKNOWN;
-                        rd_kafka_toppar_desired_link(rktp);
-
-                        if (!rd_kafka_terminating(rkt->rkt_rk))
-                                rd_kafka_toppar_enq_error(
-                                        rktp,
-                                        RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION);
-
-			rd_kafka_toppar_broker_delegate(rktp, NULL, 0);
-
-		} else {
-			/* Tell handling broker to let go of the toppar */
-			rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_REMOVE;
-			rd_kafka_toppar_broker_leave_for_remove(rktp);
-		}
-
-		rd_kafka_toppar_unlock(rktp);
-
-		rd_kafka_toppar_destroy(s_rktp);
-	}
-
-	if (likely(rktp_ua != NULL)) {
-		/* Move messages from removed partitions to UA for
-		 * further processing. */
-		rktp = rd_kafka_toppar_s2i(rktp_ua);
-
-		// FIXME: tmpq not used
-		if (rd_kafka_msgq_len(&tmpq) > 0) {
-			rd_kafka_dbg(rkt->rkt_rk, TOPIC, "TOPPARMOVE",
-				     "Moving %d messages (%zd bytes) from "
-				     "%d removed partitions to UA partition",
-				     rd_kafka_msgq_len(&tmpq),
-				     rd_kafka_msgq_size(&tmpq),
-				     i - partition_cnt);
-
-
-			rd_kafka_toppar_lock(rktp);
-			rd_kafka_msgq_concat(&rktp->rktp_msgq, &tmpq);
-			rd_kafka_toppar_unlock(rktp);
-		}
-
-		rd_kafka_toppar_destroy(rktp_ua); /* .._get() above */
-	} else {
-		/* No UA, fail messages from removed partitions. */
-		if (rd_kafka_msgq_len(&tmpq) > 0) {
-			rd_kafka_dbg(rkt->rkt_rk, TOPIC, "TOPPARMOVE",
-				     "Failing %d messages (%zd bytes) from "
-				     "%d removed partitions",
-				     rd_kafka_msgq_len(&tmpq),
-				     rd_kafka_msgq_size(&tmpq),
-				     i - partition_cnt);
-
-			rd_kafka_dr_msgq(rkt, &tmpq,
-					 RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION);
-		}
-	}
-
-	if (rkt->rkt_p)
-		rd_free(rkt->rkt_p);
-
-	rkt->rkt_p = rktps;
-
-	rkt->rkt_partition_cnt = partition_cnt;
-
-	return 1;
-}
-
-
-
-/**
- * Topic 'rkt' does not exist: propagate to interested parties.
- * The topic's state must have been set to NOTEXISTS and
- * rd_kafka_topic_partition_cnt_update() must have been called prior to
- * calling this function.
- *
- * Locks: rd_kafka_topic_*lock() must be held.
- */
-static void rd_kafka_topic_propagate_notexists (rd_kafka_itopic_t *rkt,
-                                                rd_kafka_resp_err_t err) {
-        shptr_rd_kafka_toppar_t *s_rktp;
-        int i;
-
-        if (rkt->rkt_rk->rk_type != RD_KAFKA_CONSUMER)
-                return;
-
-
-        /* Notify consumers that the topic doesn't exist. */
-        RD_LIST_FOREACH(s_rktp, &rkt->rkt_desp, i)
-                rd_kafka_toppar_enq_error(rd_kafka_toppar_s2i(s_rktp), err);
-}
-
-
-/**
- * Assign messages on the UA partition to available partitions.
- * Locks: rd_kafka_topic_*lock() must be held.
- */
-static void rd_kafka_topic_assign_uas (rd_kafka_itopic_t *rkt,
-                                       rd_kafka_resp_err_t err) {
-	rd_kafka_t *rk = rkt->rkt_rk;
-	shptr_rd_kafka_toppar_t *s_rktp_ua;
-        rd_kafka_toppar_t *rktp_ua;
-	rd_kafka_msg_t *rkm, *tmp;
-	rd_kafka_msgq_t uas = RD_KAFKA_MSGQ_INITIALIZER(uas);
-	rd_kafka_msgq_t failed = RD_KAFKA_MSGQ_INITIALIZER(failed);
-	int cnt;
-
-	if (rkt->rkt_rk->rk_type != RD_KAFKA_PRODUCER)
-		return;
-
-	s_rktp_ua = rd_kafka_toppar_get(rkt, RD_KAFKA_PARTITION_UA, 0);
-	if (unlikely(!s_rktp_ua)) {
-		rd_kafka_dbg(rk, TOPIC, "ASSIGNUA",
-			     "No UnAssigned partition available for %s",
-			     rkt->rkt_topic->str);
-		return;
-	}
-
-        rktp_ua = rd_kafka_toppar_s2i(s_rktp_ua);
-
-	/* Assign all unassigned messages to new topics. */
-	rd_kafka_dbg(rk, TOPIC, "PARTCNT",
-		     "Partitioning %i unassigned messages in topic %.*s to "
-		     "%"PRId32" partitions",
-		     rd_atomic32_get(&rktp_ua->rktp_msgq.rkmq_msg_cnt),
-		     RD_KAFKAP_STR_PR(rkt->rkt_topic),
-		     rkt->rkt_partition_cnt);
-
-	rd_kafka_toppar_lock(rktp_ua);
-	rd_kafka_msgq_move(&uas, &rktp_ua->rktp_msgq);
-	cnt = rd_atomic32_get(&uas.rkmq_msg_cnt);
-	rd_kafka_toppar_unlock(rktp_ua);
-
-	TAILQ_FOREACH_SAFE(rkm, &uas.rkmq_msgs, rkm_link, tmp) {
-		/* Fast-path for failing messages with forced partition */
-		if (rkm->rkm_partition != RD_KAFKA_PARTITION_UA &&
-		    rkm->rkm_partition >= rkt->rkt_partition_cnt &&
-		    rkt->rkt_state != RD_KAFKA_TOPIC_S_UNKNOWN) {
-			rd_kafka_msgq_enq(&failed, rkm);
-			continue;
-		}
-
-		if (unlikely(rd_kafka_msg_partitioner(rkt, rkm, 0) != 0)) {
-			/* Desired partition not available */
-			rd_kafka_msgq_enq(&failed, rkm);
-		}
-	}
-
-	rd_kafka_dbg(rk, TOPIC, "UAS",
-		     "%i/%i messages were partitioned in topic %s",
-		     cnt - rd_atomic32_get(&failed.rkmq_msg_cnt),
-		     cnt, rkt->rkt_topic->str);
-
-	if (rd_atomic32_get(&failed.rkmq_msg_cnt) > 0) {
-		/* Fail the messages */
-		rd_kafka_dbg(rk, TOPIC, "UAS",
-			     "%"PRId32"/%i messages failed partitioning "
-			     "in topic %s",
-			     rd_atomic32_get(&uas.rkmq_msg_cnt), cnt,
-			     rkt->rkt_topic->str);
-		rd_kafka_dr_msgq(rkt, &failed,
-				 rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS ?
-				 err :
-				 RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION);
-	}
-
-	rd_kafka_toppar_destroy(s_rktp_ua); /* from get() */
-}
-
-
-/**
- * Received metadata request contained no information about topic 'rkt'
- * and thus indicates the topic is not available in the cluster.
- */
-void rd_kafka_topic_metadata_none (rd_kafka_itopic_t *rkt) {
-	rd_kafka_topic_wrlock(rkt);
-
-	if (unlikely(rd_atomic32_get(&rkt->rkt_rk->rk_terminate))) {
-		/* Dont update metadata while terminating, do this
-		 * after acquiring lock for proper synchronisation */
-		rd_kafka_topic_wrunlock(rkt);
-		return;
-	}
-
-	rkt->rkt_ts_metadata = rd_clock();
-
-        rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_NOTEXISTS);
-
-        rkt->rkt_flags &= ~RD_KAFKA_TOPIC_F_LEADER_UNAVAIL;
-
-	/* Update number of partitions */
-	rd_kafka_topic_partition_cnt_update(rkt, 0);
-
-        /* Purge messages with forced partition */
-        rd_kafka_topic_assign_uas(rkt, RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC);
-
-        /* Propagate nonexistent topic info */
-        rd_kafka_topic_propagate_notexists(rkt,
-                                           RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC);
-
-	rd_kafka_topic_wrunlock(rkt);
-}
-
-
-/**
- * @brief Update a topic from metadata.
- *
- * @param ts_age absolute age (timestamp) of metadata.
- * @returns 1 if the number of partitions changed, 0 if not, and -1 if the
- *          topic is unknown.
-
- *
- * @locks rd_kafka*lock()
- */
-static int
-rd_kafka_topic_metadata_update (rd_kafka_itopic_t *rkt,
-                                const struct rd_kafka_metadata_topic *mdt,
-                                rd_ts_t ts_age) {
-        rd_kafka_t *rk = rkt->rkt_rk;
-	int upd = 0;
-	int j;
-        rd_kafka_broker_t **partbrokers;
-        int leader_cnt = 0;
-        int old_state;
-
-	if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR)
-		rd_kafka_dbg(rk, TOPIC|RD_KAFKA_DBG_METADATA, "METADATA",
-			   "Error in metadata reply for "
-			   "topic %s (PartCnt %i): %s",
-			   rkt->rkt_topic->str, mdt->partition_cnt,
-			   rd_kafka_err2str(mdt->err));
-
-        if (unlikely(rd_kafka_terminating(rk))) {
-                /* Dont update metadata while terminating, do this
-                 * after acquiring lock for proper synchronisation */
-                return -1;
-        }
-
-        /* Look up brokers before acquiring rkt lock to preserve lock order */
-        partbrokers = rd_alloca(mdt->partition_cnt * sizeof(*partbrokers));
-
-	for (j = 0 ; j < mdt->partition_cnt ; j++) {
-		if (mdt->partitions[j].leader == -1) {
-                        partbrokers[j] = NULL;
-			continue;
-		}
-
-                partbrokers[j] =
-                        rd_kafka_broker_find_by_nodeid(rk,
-                                                       mdt->partitions[j].
-                                                       leader);
-	}
-
-
-	rd_kafka_topic_wrlock(rkt);
-
-        old_state = rkt->rkt_state;
-	rkt->rkt_ts_metadata = ts_age;
-
-	/* Set topic state */
-	if (mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART ||
-	    mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN/*auto.create.topics fails*/||
-            mdt->err == RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION/*invalid topic*/)
-                rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_NOTEXISTS);
-        else if (mdt->partition_cnt > 0)
-                rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_EXISTS);
-
-	/* Update number of partitions, but not if there are
-	 * (possibly intermittent) errors (e.g., "Leader not available"). */
-	if (mdt->err == RD_KAFKA_RESP_ERR_NO_ERROR)
-		upd += rd_kafka_topic_partition_cnt_update(rkt,
-							   mdt->partition_cnt);
-
-	/* Update leader for each partition */
-	for (j = 0 ; j < mdt->partition_cnt ; j++) {
-                int r;
-		rd_kafka_broker_t *leader;
-
-		rd_kafka_dbg(rk, TOPIC|RD_KAFKA_DBG_METADATA, "METADATA",
-			   "  Topic %s partition %i Leader %"PRId32,
-			   rkt->rkt_topic->str,
-			   mdt->partitions[j].id,
-			   mdt->partitions[j].leader);
-
-		leader = partbrokers[j];
-		partbrokers[j] = NULL;
-
-		/* Update leader for partition */
-		r = rd_kafka_toppar_leader_update2(rkt,
-						   mdt->partitions[j].id,
-                                                   mdt->partitions[j].leader,
-						   leader);
-
-                upd += (r != 0 ? 1 : 0);
-
-                if (leader) {
-                        if (r != -1)
-                                leader_cnt++;
-                        /* Drop reference to broker (from find()) */
-                        rd_kafka_broker_destroy(leader);
-                }
-        }
-
-        /* If all partitions have leaders we can turn off fast leader query. */
-        if (mdt->partition_cnt > 0 && leader_cnt == mdt->partition_cnt)
-                rkt->rkt_flags &= ~RD_KAFKA_TOPIC_F_LEADER_UNAVAIL;
-
-	if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR && rkt->rkt_partition_cnt) {
-                /* (Possibly intermittent) topic-wide error:
-                 * remove leaders for partitions */
-
-		for (j = 0 ; j < rkt->rkt_partition_cnt ; j++) {
-                        rd_kafka_toppar_t *rktp;
-			if (!rkt->rkt_p[j])
-                                continue;
-
-                        rktp = rd_kafka_toppar_s2i(rkt->rkt_p[j]);
-                        rd_kafka_toppar_lock(rktp);
-                        rd_kafka_toppar_broker_delegate(rktp, NULL, 0);
-                        rd_kafka_toppar_unlock(rktp);
-                }
-        }
-
-	/* Try to assign unassigned messages to new partitions, or fail them */
-	if (upd > 0 || rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS)
-		rd_kafka_topic_assign_uas(rkt, mdt->err ?
-                                          mdt->err :
-                                          RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC);
-
-        /* Trigger notexists propagation */
-        if (old_state != (int)rkt->rkt_state &&
-            rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS)
-                rd_kafka_topic_propagate_notexists(
-                        rkt,
-                        mdt->err ? mdt->err : RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC);
-
-	rd_kafka_topic_wrunlock(rkt);
-
-	/* Loose broker references */
-	for (j = 0 ; j < mdt->partition_cnt ; j++)
-		if (partbrokers[j])
-			rd_kafka_broker_destroy(partbrokers[j]);
-
-
-	return upd;
-}
-
-/**
- * @brief Update topic by metadata, if topic is locally known.
- * @sa rd_kafka_topic_metadata_update()
- * @locks none
- */
-int
-rd_kafka_topic_metadata_update2 (rd_kafka_broker_t *rkb,
-                                 const struct rd_kafka_metadata_topic *mdt) {
-        rd_kafka_itopic_t *rkt;
-        shptr_rd_kafka_itopic_t *s_rkt;
-        int r;
-
-        rd_kafka_wrlock(rkb->rkb_rk);
-        if (!(s_rkt = rd_kafka_topic_find(rkb->rkb_rk,
-                                          mdt->topic, 0/*!lock*/))) {
-                rd_kafka_wrunlock(rkb->rkb_rk);
-                return -1; /* Ignore topics that we dont have locally. */
-        }
-
-        rkt = rd_kafka_topic_s2i(s_rkt);
-
-        r = rd_kafka_topic_metadata_update(rkt, mdt, rd_clock());
-
-        rd_kafka_wrunlock(rkb->rkb_rk);
-
-        rd_kafka_topic_destroy0(s_rkt); /* from find() */
-
-        return r;
-}
-
-
-
-/**
- * @returns a list of all partitions (s_rktp's) for a topic.
- * @remark rd_kafka_topic_*lock() MUST be held.
- */
-static rd_list_t *rd_kafka_topic_get_all_partitions (rd_kafka_itopic_t *rkt) {
-	rd_list_t *list;
-	shptr_rd_kafka_toppar_t *s_rktp;
-	int i;
-
-        list = rd_list_new(rkt->rkt_partition_cnt +
-                           rd_list_cnt(&rkt->rkt_desp) + 1/*ua*/, NULL);
-
-	for (i = 0 ; i < rkt->rkt_partition_cnt ; i++)
-		rd_list_add(list, rd_kafka_toppar_keep(
-				    rd_kafka_toppar_s2i(rkt->rkt_p[i])));
-
-	RD_LIST_FOREACH(s_rktp, &rkt->rkt_desp, i)
-		rd_list_add(list, rd_kafka_toppar_keep(
-				    rd_kafka_toppar_s2i(s_rktp)));
-
-	if (rkt->rkt_ua)
-		rd_list_add(list, rd_kafka_toppar_keep(
-				    rd_kafka_toppar_s2i(rkt->rkt_ua)));
-
-	return list;
-}
-
-
-
-
-/**
- * Remove all partitions from a topic, including the ua.
- * Must only be called during rd_kafka_t termination.
- *
- * Locality: main thread
- */
-void rd_kafka_topic_partitions_remove (rd_kafka_itopic_t *rkt) {
-        shptr_rd_kafka_toppar_t *s_rktp;
-        shptr_rd_kafka_itopic_t *s_rkt;
-	rd_list_t *partitions;
-	int i;
-
-	/* Purge messages for all partitions outside the topic_wrlock since
-	 * a message can hold a reference to the topic_t and thus
-	 * would trigger a recursive lock dead-lock. */
-	rd_kafka_topic_rdlock(rkt);
-	partitions = rd_kafka_topic_get_all_partitions(rkt);
-	rd_kafka_topic_rdunlock(rkt);
-
-	RD_LIST_FOREACH(s_rktp, partitions, i) {
-		rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp);
-
-		rd_kafka_toppar_lock(rktp);
-		rd_kafka_msgq_purge(rkt->rkt_rk, &rktp->rktp_msgq);
-		rd_kafka_toppar_purge_queues(rktp);
-		rd_kafka_toppar_unlock(rktp);
-
-		rd_kafka_toppar_destroy(s_rktp);
-	}
-	rd_list_destroy(partitions);
-
-	s_rkt = rd_kafka_topic_keep(rkt);
-	rd_kafka_topic_wrlock(rkt);
-
-	/* Setting the partition count to 0 moves all partitions to
-	 * the desired list (rktp_desp). */
-        rd_kafka_topic_partition_cnt_update(rkt, 0);
-
-        /* Now clean out the desired partitions list.
-         * Use reverse traversal to avoid excessive memory shuffling
-         * in rd_list_remove() */
-        RD_LIST_FOREACH_REVERSE(s_rktp, &rkt->rkt_desp, i) {
-		rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp);
-		/* Our reference */
-		shptr_rd_kafka_toppar_t *s_rktp2 = rd_kafka_toppar_keep(rktp);
-                rd_kafka_toppar_lock(rktp);
-                rd_kafka_toppar_desired_del(rktp);
-                rd_kafka_toppar_unlock(rktp);
-                rd_kafka_toppar_destroy(s_rktp2);
-        }
-
-        rd_kafka_assert(rkt->rkt_rk, rkt->rkt_partition_cnt == 0);
-
-	if (rkt->rkt_p)
-		rd_free(rkt->rkt_p);
-
-	rkt->rkt_p = NULL;
-	rkt->rkt_partition_cnt = 0;
-
-        if ((s_rktp = rkt->rkt_ua)) {
-                rkt->rkt_ua = NULL;
-                rd_kafka_toppar_destroy(s_rktp);
-	}
-
-	rd_kafka_topic_wrunlock(rkt);
-
-	rd_kafka_topic_destroy0(s_rkt);
-}
-
-
-
-/**
- * Scan all topics and partitions for:
- *  - timed out messages.
- *  - topics that needs to be created on the broker.
- *  - topics who's metadata is too old.
- */
-int rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now) {
-	rd_kafka_itopic_t *rkt;
-	rd_kafka_toppar_t *rktp;
-        shptr_rd_kafka_toppar_t *s_rktp;
-	int totcnt = 0;
-        rd_list_t query_topics;
-
-        rd_list_init(&query_topics, 0, rd_free);
-
-	rd_kafka_rdlock(rk);
-	TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
-		int p;
-                int cnt = 0, tpcnt = 0;
-                rd_kafka_msgq_t timedout;
-                int query_this = 0;
-
-                rd_kafka_msgq_init(&timedout);
-
-		rd_kafka_topic_wrlock(rkt);
-
-                /* Check if metadata information has timed out. */
-                if (rkt->rkt_state != RD_KAFKA_TOPIC_S_UNKNOWN &&
-                    !rd_kafka_metadata_cache_topic_get(
-                            rk, rkt->rkt_topic->str, 1/*only valid*/)) {
-                        rd_kafka_dbg(rk, TOPIC, "NOINFO",
-                                     "Topic %s metadata information timed out "
-                                     "(%"PRId64"ms old)",
-                                     rkt->rkt_topic->str,
-                                     (rd_clock() - rkt->rkt_ts_metadata)/1000);
-                        rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_UNKNOWN);
-
-                        query_this = 1;
-                }
-
-                /* Just need a read-lock from here on. */
-                rd_kafka_topic_wrunlock(rkt);
-                rd_kafka_topic_rdlock(rkt);
-
-                if (rkt->rkt_partition_cnt == 0) {
-                        /* If this partition is unknown by brokers try
-                         * to create it by sending a topic-specific
-                         * metadata request.
-                         * This requires "auto.create.topics.enable=true"
-                         * on the brokers. */
-                        rd_kafka_dbg(rk, TOPIC, "NOINFO",
-                                     "Topic %s partition count is zero: "
-                                     "should refresh metadata",
-                                     rkt->rkt_topic->str);
-
-                        query_this = 1;
-                }
-
-		for (p = RD_KAFKA_PARTITION_UA ;
-		     p < rkt->rkt_partition_cnt ; p++) {
-			int did_tmout = 0;
-
-			if (!(s_rktp = rd_kafka_toppar_get(rkt, p, 0)))
-				continue;
-
-                        rktp = rd_kafka_toppar_s2i(s_rktp);
-			rd_kafka_toppar_lock(rktp);
-
-                        /* Check that partition has a leader that is up,
-                         * else add topic to query list. */
-                        if (p != RD_KAFKA_PARTITION_UA &&
-                            (!rktp->rktp_leader ||
-                             rktp->rktp_leader->rkb_source ==
-                             RD_KAFKA_INTERNAL ||
-                             rd_kafka_broker_get_state(rktp->rktp_leader) <
-                             RD_KAFKA_BROKER_STATE_UP)) {
-                                rd_kafka_dbg(rk, TOPIC, "QRYLEADER",
-                                             "Topic %s [%"PRId32"]: "
-                                             "leader is %s: re-query",
-                                             rkt->rkt_topic->str,
-                                             rktp->rktp_partition,
-                                             !rktp->rktp_leader ?
-                                             "unavailable" :
-                                             (rktp->rktp_leader->rkb_source ==
-                                              RD_KAFKA_INTERNAL ? "internal":
-                                              "down"));
-                                query_this = 1;
-                        }
-
-			/* Scan toppar's message queues for timeouts */
-			if (rd_kafka_msgq_age_scan(&rktp->rktp_xmit_msgq,
-						   &timedout, now) > 0)
-				did_tmout = 1;
-
-			if (rd_kafka_msgq_age_scan(&rktp->rktp_msgq,
-						   &timedout, now) > 0)
-				did_tmout = 1;
-
-			tpcnt += did_tmout;
-
-			rd_kafka_toppar_unlock(rktp);
-			rd_kafka_toppar_destroy(s_rktp);
-		}
-
-                rd_kafka_topic_rdunlock(rkt);
-
-                if ((cnt = rd_atomic32_get(&timedout.rkmq_msg_cnt)) > 0) {
-                        totcnt += cnt;
-                        rd_kafka_dbg(rk, MSG, "TIMEOUT",
-                                     "%s: %"PRId32" message(s) "
-                                     "from %i toppar(s) timed out",
-                                     rkt->rkt_topic->str, cnt, tpcnt);
-                        rd_kafka_dr_msgq(rkt, &timedout,
-                                         RD_KAFKA_RESP_ERR__MSG_TIMED_OUT);
-                }
-
-                /* Need to re-query this topic's leader. */
-                if (query_this &&
-                    !rd_list_find(&query_topics, rkt->rkt_topic->str,
-                                  (void *)strcmp))
-                        rd_list_add(&query_topics,
-                                    rd_strdup(rkt->rkt_topic->str));
-
-        }
-        rd_kafka_rdunlock(rk);
-
-        if (!rd_list_empty(&query_topics))
-                rd_kafka_metadata_refresh_topics(rk, NULL, &query_topics,
-                                                 1/*force even if cached
-                                                    * info exists*/,
-                                                 "refresh unavailable topics");
-        rd_list_destroy(&query_topics);
-
-        return totcnt;
-}
-
-
-/**
- * Locks: rd_kafka_topic_*lock() must be held.
- */
-int rd_kafka_topic_partition_available (const rd_kafka_topic_t *app_rkt,
-					int32_t partition) {
-	int avail;
-	shptr_rd_kafka_toppar_t *s_rktp;
-        rd_kafka_toppar_t *rktp;
-        rd_kafka_broker_t *rkb;
-
-	s_rktp = rd_kafka_toppar_get(rd_kafka_topic_a2i(app_rkt),
-                                     partition, 0/*no ua-on-miss*/);
-	if (unlikely(!s_rktp))
-		return 0;
-
-        rktp = rd_kafka_toppar_s2i(s_rktp);
-        rkb = rd_kafka_toppar_leader(rktp, 1/*proper broker*/);
-        avail = rkb ? 1 : 0;
-        if (rkb)
-                rd_kafka_broker_destroy(rkb);
-	rd_kafka_toppar_destroy(s_rktp);
-	return avail;
-}
-
-
-void *rd_kafka_topic_opaque (const rd_kafka_topic_t *app_rkt) {
-        return rd_kafka_topic_a2i(app_rkt)->rkt_conf.opaque;
-}
-
-int rd_kafka_topic_info_cmp (const void *_a, const void *_b) {
-	const rd_kafka_topic_info_t *a = _a, *b = _b;
-	int r;
-
-	if ((r = strcmp(a->topic, b->topic)))
-		return r;
-
-	return a->partition_cnt - b->partition_cnt;
-}
-
-
-/**
- * Allocate new topic_info.
- * \p topic is copied.
- */
-rd_kafka_topic_info_t *rd_kafka_topic_info_new (const char *topic,
-						int partition_cnt) {
-	rd_kafka_topic_info_t *ti;
-	size_t tlen = strlen(topic) + 1;
-
-	/* Allocate space for the topic along with the struct */
-	ti = rd_malloc(sizeof(*ti) + tlen);
-	ti->topic = (char *)(ti+1);
-	memcpy((char *)ti->topic, topic, tlen);
-	ti->partition_cnt = partition_cnt;
-
-	return ti;
-}
-
-/**
- * Destroy/free topic_info
- */
-void rd_kafka_topic_info_destroy (rd_kafka_topic_info_t *ti) {
-	rd_free(ti);
-}
-
-
-/**
- * @brief Match \p topic to \p pattern.
- *
- * If pattern begins with "^" it is considered a regexp,
- * otherwise a simple string comparison is performed.
- *
- * @returns 1 on match, else 0.
- */
-int rd_kafka_topic_match (rd_kafka_t *rk, const char *pattern,
-			  const char *topic) {
-	char errstr[128];
-
-	if (*pattern == '^') {
-		int r = rd_regex_match(pattern, topic, errstr, sizeof(errstr));
-		if (unlikely(r == -1))
-			rd_kafka_dbg(rk, TOPIC, "TOPICREGEX",
-				     "Topic \"%s\" regex \"%s\" "
-				     "matching failed: %s",
-				     topic, pattern, errstr);
-		return r == 1;
-	} else
-		return !strcmp(pattern, topic);
-}
-
-
-
-
-
-
-
-
-
-/**
- * Trigger broker metadata query for topic leader.
- * 'rkt' may be NULL to query for all topics.
- *
- * @locks none
- */
-void rd_kafka_topic_leader_query0 (rd_kafka_t *rk, rd_kafka_itopic_t *rkt,
-                                   int do_rk_lock) {
-        rd_list_t topics;
-
-        rd_list_init(&topics, 1, rd_free);
-        rd_list_add(&topics, rd_strdup(rkt->rkt_topic->str));
-
-        rd_kafka_metadata_refresh_topics(rk, NULL, &topics,
-                                         0/*dont force*/, "leader query");
-
-        if (rkt)
-                rd_list_destroy(&topics);
-}
-
-
-
-/**
- * @brief Populate list \p topics with the topic names (strdupped char *) of
- *        all locally known topics.
- *
- * @remark \p rk lock MUST NOT be held
- */
-void rd_kafka_local_topics_to_list (rd_kafka_t *rk, rd_list_t *topics) {
-        rd_kafka_itopic_t *rkt;
-
-        rd_kafka_rdlock(rk);
-        rd_list_grow(topics, rk->rk_topic_cnt);
-        TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link)
-                rd_list_add(topics, rd_strdup(rkt->rkt_topic->str));
-        rd_kafka_rdunlock(rk);
-}


[15/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdposix.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdposix.h b/thirdparty/librdkafka-0.11.1/src/rdposix.h
deleted file mode 100644
index 72f9814..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdposix.h
+++ /dev/null
@@ -1,182 +0,0 @@
-#pragma once
-/*
-* librdkafka - Apache Kafka C library
-*
-* Copyright (c) 2012-2015 Magnus Edenhill
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are met:
-*
-* 1. Redistributions of source code must retain the above copyright notice,
-*    this list of conditions and the following disclaimer.
-* 2. Redistributions in binary form must reproduce the above copyright notice,
-*    this list of conditions and the following disclaimer in the documentation
-*    and/or other materials provided with the distribution.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/**
- * POSIX system support
- */
-#pragma once
-
-#include <unistd.h>
-#include <stdio.h>
-#include <sys/time.h>
-#include <inttypes.h>
-#include <fcntl.h>
-#include <errno.h>
-
-/**
-* Types
-*/
-
-
-/**
- * Annotations, attributes, optimizers
- */
-#ifndef likely
-#define likely(x)   __builtin_expect((x),1)
-#endif
-#ifndef unlikely
-#define unlikely(x) __builtin_expect((x),0)
-#endif
-
-#define RD_UNUSED   __attribute__((unused))
-#define RD_INLINE   inline
-#define RD_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
-#define RD_NORETURN __attribute__((noreturn))
-#define RD_IS_CONSTANT(p)  __builtin_constant_p((p))
-#define RD_TLS      __thread
-
-/**
-* Allocation
-*/
-#if !defined(__FreeBSD__)
-/* alloca(3) is in stdlib on FreeBSD */
-#include <alloca.h>
-#endif
-
-#define rd_alloca(N)  alloca(N)
-
-
-/**
-* Strings, formatting, printf, ..
-*/
-
-/* size_t and ssize_t format strings */
-#define PRIusz  "zu"
-#define PRIdsz  "zd"
-
-#define RD_FORMAT(...) __attribute__((format (__VA_ARGS__)))
-#define rd_snprintf(...)  snprintf(__VA_ARGS__)
-#define rd_vsnprintf(...) vsnprintf(__VA_ARGS__)
-
-#define rd_strcasecmp(A,B) strcasecmp(A,B)
-#define rd_strncasecmp(A,B,N) strncasecmp(A,B,N)
-
-/**
- * Errors
- */
-#if HAVE_STRERROR_R
-static RD_INLINE RD_UNUSED const char *rd_strerror(int err) {
-        static RD_TLS char ret[128];
-
-#if defined(__linux__) && defined(_GNU_SOURCE)
-        return strerror_r(err, ret, sizeof(ret));
-#else /* XSI version */
-        int r;
-        /* The r assignment is to catch the case where
-         * _GNU_SOURCE is not defined but the GNU version is
-         * picked up anyway. */
-        r = strerror_r(err, ret, sizeof(ret));
-        if (unlikely(r))
-                rd_snprintf(ret, sizeof(ret),
-                            "strerror_r(%d) failed (ret %d)", err, r);
-        return ret;
-#endif
-}
-#else
-#define rd_strerror(err) strerror(err)
-#endif
-
-
-/**
- * Atomics
- */
-#include "rdatomic.h"
-
-/**
-* Misc
-*/
-
-/**
- * Microsecond sleep.
- * Will retry on signal interrupt unless *terminate is true.
- */
-static RD_INLINE RD_UNUSED
-void rd_usleep (int usec, rd_atomic32_t *terminate) {
-        struct timespec req = {usec / 1000000, (long)(usec % 1000000) * 1000};
-
-        /* Retry until complete (issue #272), unless terminating. */
-        while (nanosleep(&req, &req) == -1 &&
-               (errno == EINTR && (!terminate || !rd_atomic32_get(terminate))))
-                ;
-}
-
-
-
-
-#define rd_gettimeofday(tv,tz)  gettimeofday(tv,tz)
-
-
-#define rd_assert(EXPR)  assert(EXPR)
-
-/**
- * Empty struct initializer
- */
-#define RD_ZERO_INIT  {}
-
-/**
- * Sockets, IO
- */
-
-/**
- * @brief Set socket to non-blocking
- * @returns 0 on success or errno on failure.
- */
-static RD_UNUSED int rd_fd_set_nonblocking (int fd) {
-        int fl = fcntl(fd, F_GETFL, 0);
-        if (fl == -1 ||
-            fcntl(fd, F_SETFL, fl | O_NONBLOCK) == -1)
-                return errno;
-        return 0;
-}
-
-/**
- * @brief Create non-blocking pipe
- * @returns 0 on success or errno on failure
- */
-static RD_UNUSED int rd_pipe_nonblocking (int *fds) {
-        if (pipe(fds) == -1 ||
-            rd_fd_set_nonblocking(fds[0]) == -1 ||
-            rd_fd_set_nonblocking(fds[1]))
-                return errno;
-        return 0;
-}
-#define rd_pipe(fds) pipe(fds)
-#define rd_read(fd,buf,sz) read(fd,buf,sz)
-#define rd_write(fd,buf,sz) write(fd,buf,sz)
-#define rd_close(fd) close(fd)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdrand.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdrand.c b/thirdparty/librdkafka-0.11.1/src/rdrand.c
deleted file mode 100644
index 31c087d..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdrand.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdrand.h"
-
-
-
-void rd_array_shuffle (void *base, size_t nmemb, size_t entry_size) {
-	int i;
-	void *tmp = rd_alloca(entry_size);
-
-	/* FIXME: Optimized version for word-sized entries. */
-
-	for (i = (int) nmemb - 1 ; i > 0 ; i--) {
-		int j = rd_jitter(0, i);
-		if (unlikely(i == j))
-			continue;
-
-		memcpy(tmp, (char *)base + (i*entry_size), entry_size);
-		memcpy((char *)base+(i*entry_size),
-		       (char *)base+(j*entry_size), entry_size);
-		memcpy((char *)base+(j*entry_size), tmp, entry_size);
-	}
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdrand.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdrand.h b/thirdparty/librdkafka-0.11.1/src/rdrand.h
deleted file mode 100644
index 21b1e21..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdrand.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-
-/**
- * Returns a random (using rand(3)) number between 'low'..'high' (inclusive).
- */
-static RD_INLINE int rd_jitter (int low, int high) RD_UNUSED;
-static RD_INLINE int rd_jitter (int low, int high) {
-	return (low + (rand() % ((high-low)+1)));
-	
-}
-
-
-/**
- * Shuffles (randomizes) an array using the modern Fisher-Yates algorithm.
- */
-void rd_array_shuffle (void *base, size_t nmemb, size_t entry_size);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdregex.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdregex.c b/thirdparty/librdkafka-0.11.1/src/rdregex.c
deleted file mode 100644
index f9b2bac..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdregex.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rd.h"
-#include "rdregex.h"
-
-#if HAVE_REGEX
-#include <regex.h>
-struct rd_regex_s {
-	regex_t re;
-};
-
-#else
-
-#include "regexp.h"
-struct rd_regex_s {
-	Reprog *re;
-};
-#endif
-
-
-/**
- * @brief Destroy compiled regex
- */
-void rd_regex_destroy (rd_regex_t *re) {
-#if HAVE_REGEX
-	regfree(&re->re);
-#else
-	re_regfree(re->re);
-#endif
-	rd_free(re);
-}
-
-
-/**
- * @brief Compile regex \p pattern
- * @returns Compiled regex object on success on error.
- */
-rd_regex_t *
-rd_regex_comp (const char *pattern, char *errstr, size_t errstr_size) {
-	rd_regex_t *re = rd_calloc(1, sizeof(*re));
-#if HAVE_REGEX
-	int r;
-
-	r = regcomp(&re->re, pattern, REG_EXTENDED|REG_NOSUB);
-	if (r) {
-		if (errstr)
-			regerror(r, &re->re, errstr, errstr_size);
-		rd_free(re);
-		return NULL;
-	}
-#else
-	const char *errstr2;
-
-	re->re = re_regcomp(pattern, 0, &errstr2);
-	if (!re->re) {
-		if (errstr) {
-			strncpy(errstr, errstr2, errstr_size-1);
-			errstr[errstr_size-1] = '\0';
-		}
-		rd_free(re);
-		return NULL;
-	}
-#endif
-
-	return re;
-}
-
-
-/**
- * @brief Match \p str to pre-compiled regex \p re
- * @returns 1 on match, else 0
- */
-int rd_regex_exec (rd_regex_t *re, const char *str) {
-#if HAVE_REGEX
-	return regexec(&re->re, str, 0, NULL, 0) != REG_NOMATCH;
-#else
-	return !re_regexec(re->re, str, NULL, 0);	
-#endif
-}
-
-
-/**
- * @brief Perform regex match of \p str using regex \p pattern.
- *
- * @returns 1 on match, 0 on non-match or -1 on regex compilation error
- *          in which case a human readable error string is written to
- *          \p errstr (if not NULL).
- */
-int rd_regex_match (const char *pattern, const char *str,
-		    char *errstr, size_t errstr_size) {
-#if HAVE_REGEX  /* use libc regex */
-	regex_t re;
-	int r;
-
-	/* FIXME: cache compiled regex */
-	r = regcomp(&re, pattern, REG_EXTENDED|REG_NOSUB);
-	if (r) {
-		if (errstr)
-			regerror(r, &re, errstr, errstr_size);
-		return 0;
-	}
-
-	r = regexec(&re, str, 0, NULL, 0) != REG_NOMATCH;
-
-	regfree(&re);
-
-	return r;
-
-#else /* Using regexp.h from minilibs (included) */
-	Reprog *re;
-	int r;
-	const char *errstr2;
-
-	/* FIXME: cache compiled regex */
-	re = re_regcomp(pattern, 0, &errstr2);
-	if (!re) {
-		if (errstr) {
-			strncpy(errstr, errstr2, errstr_size-1);
-			errstr[errstr_size-1] = '\0';
-		}
-		return -1;
-	}
-
-	r = !re_regexec(re, str, NULL, 0);
-
-	re_regfree(re);
-
-	return r;
-#endif
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdregex.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdregex.h b/thirdparty/librdkafka-0.11.1/src/rdregex.h
deleted file mode 100644
index 9569af3..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdregex.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#pragma once
-
-typedef struct rd_regex_s rd_regex_t;
-
-void rd_regex_destroy (rd_regex_t *re);
-rd_regex_t *rd_regex_comp (const char *pattern, char *errstr, size_t errstr_size);
-int rd_regex_exec (rd_regex_t *re, const char *str);
-
-int rd_regex_match (const char *pattern, const char *str,
-		    char *errstr, size_t errstr_size);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdsignal.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdsignal.h b/thirdparty/librdkafka-0.11.1/src/rdsignal.h
deleted file mode 100644
index f816855..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdsignal.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include <signal.h>
-
-#define RD_SIG_ALL  -1
-#define RD_SIG_END  -2
-
-extern sigset_t rd_intr_sigset;
-extern int      rd_intr_blocked;
-
-static __inline void rd_intr_block (void) RD_UNUSED;
-static __inline void rd_intr_block (void) {
-	if (rd_intr_blocked++)
-		return;
-
-	sigprocmask(SIG_BLOCK, &rd_intr_sigset, NULL);
-}
-
-static __inline void rd_intr_unblock (void) RD_UNUSED;
-static __inline void rd_intr_unblock (void) {
-	assert(rd_intr_blocked > 0);
-	if (--rd_intr_blocked)
-		return;
-
-	sigprocmask(SIG_UNBLOCK, &rd_intr_sigset, NULL);
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdstring.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdstring.c b/thirdparty/librdkafka-0.11.1/src/rdstring.c
deleted file mode 100644
index 89e9b3c..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdstring.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rd.h"
-#include "rdstring.h"
-
-/**
- * @brief Render string \p template using \p callback for key lookups.
- *
- * Keys in template follow the %{keyname} syntax.
- *
- * The \p callback must not write more than \p size bytes to \p buf, must
- * should return the number of bytes it wanted to write (which will indicate
- * a truncated write).
- * If the key is not found -1 should be returned (which fails the rendering).
- *
- * @returns number of written bytes to \p dest,
- *          or -1 on failure (errstr is written)
- */
-char *rd_string_render (const char *template,
-			char *errstr, size_t errstr_size,
-			ssize_t (*callback) (const char *key,
-					     char *buf, size_t size,
-					     void *opaque),
-			 void *opaque) {
-	const char *s = template;
-	const char *tend = template + strlen(template);
-	size_t size = 256;
-	char *buf;
-	size_t of = 0;
-
-	buf = rd_malloc(size);
-
-#define _remain() (size - of - 1)
-#define _assure_space(SZ) do {				\
-		if (of + (SZ) + 1 >= size) {		\
-			size = (size + (SZ) + 1) * 2;	\
-			buf = realloc(buf, size);	\
-		}					\
-	} while (0)
-	
-#define _do_write(PTR,SZ) do {				\
-		_assure_space(SZ);			\
-		memcpy(buf+of, (PTR), (SZ));		\
-		of += (SZ);				\
-	} while (0)
-
-
-
-	while (*s) {
-		const char *t;
-		size_t tof = (size_t)(s-template);
-
-		t = strstr(s, "%{");
-		if (t != s) {
-			/* Write "abc%{" 
-			 *        ^^^ */
-			size_t len = (size_t)((t ? t : tend)-s);
-			if (len)
-				_do_write(s, len);
-		}
-
-		if (t) {
-			const char *te;
-			ssize_t r;
-			char *tmpkey;
-
-			/* Find "abc%{key}"
-			 *               ^ */
-			te = strchr(t+2, '}');
-			if (!te) {
-				rd_snprintf(errstr, errstr_size,
-					    "Missing close-brace } for "
-					    "%.*s at %"PRIusz,
-					    15, t, tof);
-				rd_free(buf);
-				return NULL;
-			}
-
-			rd_strndupa(&tmpkey, t+2, (int)(te-t-2));
-
-			/* Query callback for length of key's value. */
-			r = callback(tmpkey, NULL, 0, opaque);
-			if (r == -1) {
-				rd_snprintf(errstr, errstr_size,
-					    "Property not available: \"%s\"",
-					    tmpkey);
-				rd_free(buf);
-				return NULL;
-			}
-
-			_assure_space(r);
-
-			/* Call again now providing a large enough buffer. */
-			r = callback(tmpkey, buf+of, _remain(), opaque);
-			if (r == -1) {
-				rd_snprintf(errstr, errstr_size,
-					    "Property not available: "
-					    "\"%s\"", tmpkey);
-				rd_free(buf);
-				return NULL;
-			}
-
-			assert(r < (ssize_t)_remain());
-			of += r;
-			s = te+1;
-
-		} else {
-			s = tend;
-		}
-	}
-
-	buf[of] = '\0';
-	return buf;
-}
-
-
-
-
-void rd_strtup_destroy (rd_strtup_t *strtup) {
-        rd_free(strtup);
-}
-
-rd_strtup_t *rd_strtup_new (const char *name, const char *value) {
-        size_t name_sz = strlen(name) + 1;
-        size_t value_sz = strlen(value) + 1;
-        rd_strtup_t *strtup;
-
-        strtup = rd_malloc(sizeof(*strtup) +
-                           name_sz + value_sz - 1/*name[1]*/);
-        memcpy(strtup->name, name, name_sz);
-        strtup->value = &strtup->name[name_sz];
-        memcpy(strtup->value, value, value_sz);
-
-        return strtup;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdstring.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdstring.h b/thirdparty/librdkafka-0.11.1/src/rdstring.h
deleted file mode 100644
index 154bc3d..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdstring.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#pragma once
-
-
-
-char *rd_string_render (const char *templ,
-                        char *errstr, size_t errstr_size,
-                        ssize_t (*callback) (const char *key,
-                                             char *buf, size_t size,
-                                             void *opaque),
-                        void *opaque);
-
-
-
-/**
- * @brief An immutable string tuple (name, value) in a single allocation.
- */
-typedef struct rd_strtup_s {
-        char *value;
-        char  name[1];  /* Actual allocation of name + val here */
-} rd_strtup_t;
-
-void rd_strtup_destroy (rd_strtup_t *strtup);
-rd_strtup_t *rd_strtup_new (const char *name, const char *value);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdsysqueue.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdsysqueue.h b/thirdparty/librdkafka-0.11.1/src/rdsysqueue.h
deleted file mode 100644
index 9acfdfd..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdsysqueue.h
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * Copyright (c) 2012-2013, Andreas Öman
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/*
-
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include "queue.h"
-
-/*
- * Complete missing LIST-ops
- */
-
-#ifndef LIST_FOREACH
-#define	LIST_FOREACH(var, head, field)					\
-	for ((var) = ((head)->lh_first);				\
-		(var);							\
-		(var) = ((var)->field.le_next))
-#endif
-
-#ifndef LIST_EMPTY
-#define	LIST_EMPTY(head)		((head)->lh_first == NULL)
-#endif
-
-#ifndef LIST_FIRST
-#define	LIST_FIRST(head)		((head)->lh_first)
-#endif
-
-#ifndef LIST_NEXT
-#define	LIST_NEXT(elm, field)		((elm)->field.le_next)
-#endif
-
-#ifndef LIST_INSERT_BEFORE
-#define	LIST_INSERT_BEFORE(listelm, elm, field) do {			\
-	(elm)->field.le_prev = (listelm)->field.le_prev;		\
-	(elm)->field.le_next = (listelm);				\
-	*(listelm)->field.le_prev = (elm);				\
-	(listelm)->field.le_prev = &(elm)->field.le_next;		\
-} while (/*CONSTCOND*/0)
-#endif
-
-/*
- * Complete missing TAILQ-ops
- */
-
-#ifndef	TAILQ_HEAD_INITIALIZER
-#define	TAILQ_HEAD_INITIALIZER(head)					\
-	{ NULL, &(head).tqh_first }
-#endif
-
-#ifndef TAILQ_INSERT_BEFORE
-#define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
-	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
-	(elm)->field.tqe_next = (listelm);				\
-	*(listelm)->field.tqe_prev = (elm);				\
-	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
-} while (0)
-#endif
-
-#ifndef TAILQ_FOREACH
-#define TAILQ_FOREACH(var, head, field)                                     \
- for ((var) = ((head)->tqh_first); (var); (var) = ((var)->field.tqe_next))
-#endif
-
-#ifndef TAILQ_EMPTY
-#define	TAILQ_EMPTY(head)		((head)->tqh_first == NULL)
-#endif
-
-#ifndef TAILQ_FIRST
-#define TAILQ_FIRST(head)               ((head)->tqh_first)
-#endif
-
-#ifndef TAILQ_NEXT
-#define TAILQ_NEXT(elm, field)          ((elm)->field.tqe_next)
-#endif
-
-#ifndef TAILQ_LAST
-#define TAILQ_LAST(head, headname) \
-        (*(((struct headname *)((head)->tqh_last))->tqh_last))
-#endif
-
-#ifndef TAILQ_PREV
-#define TAILQ_PREV(elm, headname, field) \
-        (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
-#endif
-
-#ifndef TAILQ_FOREACH_SAFE
-/*
- * TAILQ_FOREACH_SAFE() provides a traversal where the current iterated element
- * may be freed or unlinked.
- * It does not allow freeing or modifying any other element in the list,
- * at least not the next element.
- */
-#define TAILQ_FOREACH_SAFE(elm,head,field,tmpelm)			\
-	for ((elm) = TAILQ_FIRST(head) ;				\
-	     (elm) && ((tmpelm) = TAILQ_NEXT((elm), field), 1) ;	\
-	     (elm) = (tmpelm))
-#endif
-
-/* 
- * In Mac OS 10.4 and earlier TAILQ_FOREACH_REVERSE was defined
- * differently, redefined it.
- */
-#ifdef __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__
-#if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1050
-#undef TAILQ_FOREACH_REVERSE
-#endif
-#endif
-
-#ifndef TAILQ_FOREACH_REVERSE
-#define	TAILQ_FOREACH_REVERSE(var, head, headname, field)		\
-	for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));	\
-		(var);							\
-		(var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
-#endif
-
-
-/**
- * Treat the TAILQ as a circular list and return the previous/next entry,
- * possibly wrapping to the end/beginning.
- */
-#define TAILQ_CIRC_PREV(var, head, headname, field)	\
-	((var) != TAILQ_FIRST(head) ?			\
-	 TAILQ_PREV(var, headname, field) :		\
-	 TAILQ_LAST(head, headname))
-
-#define TAILQ_CIRC_NEXT(var, head, headname, field)	\
-	((var) != TAILQ_LAST(head, headname) ?		\
-	 TAILQ_NEXT(var, field) :			\
-	 TAILQ_FIRST(head))
-
-/*
- * Some extra functions for LIST manipulation
- */
-
-#define LIST_INSERT_SORTED(head, elm, elmtype, field, cmpfunc) do {	\
-        if(LIST_EMPTY(head)) {					\
-           LIST_INSERT_HEAD(head, elm, field);			\
-        } else {						\
-           elmtype _tmp;					\
-           LIST_FOREACH(_tmp,head,field) {			\
-              if(cmpfunc(elm,_tmp) < 0) {			\
-                LIST_INSERT_BEFORE(_tmp,elm,field);		\
-                break;						\
-              }							\
-              if(!LIST_NEXT(_tmp,field)) {			\
-                 LIST_INSERT_AFTER(_tmp,elm,field);		\
-                 break;						\
-              }							\
-           }							\
-        }							\
-} while(0)
-
-#ifndef TAILQ_INSERT_SORTED
-#define TAILQ_INSERT_SORTED(head, elm, elmtype, field, cmpfunc) do {	\
-        if(TAILQ_FIRST(head) == NULL) {				\
-           TAILQ_INSERT_HEAD(head, elm, field);			\
-        } else {						\
-           elmtype _tmp;					\
-           TAILQ_FOREACH(_tmp,head,field) {			\
-              if(cmpfunc(elm,_tmp) < 0) {			\
-                TAILQ_INSERT_BEFORE(_tmp,elm,field);		\
-                break;						\
-              }							\
-              if(!TAILQ_NEXT(_tmp,field)) {			\
-                 TAILQ_INSERT_AFTER(head,_tmp,elm,field);	\
-                 break;						\
-              }							\
-           }							\
-        }							\
-} while(0)
-#endif
-
-#define TAILQ_MOVE(newhead, oldhead, field) do { \
-        if(TAILQ_FIRST(oldhead)) { \
-           TAILQ_FIRST(oldhead)->field.tqe_prev = &(newhead)->tqh_first;  \
-	   (newhead)->tqh_first = (oldhead)->tqh_first;			\
-	   (newhead)->tqh_last = (oldhead)->tqh_last;			\
-	   TAILQ_INIT(oldhead);						\
-	} else								\
-		TAILQ_INIT(newhead);					\
-	} while (/*CONSTCOND*/0) 
-
-#ifndef TAILQ_CONCAT
-#define TAILQ_CONCAT(dhead, shead, field) do {                          \
-		if (!TAILQ_EMPTY(shead)) {				\
-			*(dhead)->tqh_last = (shead)->tqh_first;	\
-			(shead)->tqh_first->field.tqe_prev =		\
-				(dhead)->tqh_last;			\
-			(dhead)->tqh_last = (shead)->tqh_last;		\
-			TAILQ_INIT((shead));				\
-		}							\
-	} while (0)
-#endif
-
-#ifndef SIMPLEQ_HEAD
-#define SIMPLEQ_HEAD(name, type)					\
-struct name {								\
-struct type *sqh_first;                                                 \
-struct type **sqh_last;                                                 \
-}
-#endif
-
-#ifndef SIMPLEQ_ENTRY
-#define SIMPLEQ_ENTRY(type)						\
-struct {								\
-struct type *sqe_next;                                                  \
-}
-#endif
-
-#ifndef SIMPLEQ_FIRST
-#define	SIMPLEQ_FIRST(head)	    ((head)->sqh_first)
-#endif
-
-#ifndef SIMPLEQ_REMOVE_HEAD
-#define SIMPLEQ_REMOVE_HEAD(head, field) do {			        \
-if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL)    \
-(head)->sqh_last = &(head)->sqh_first;			                \
-} while (0)
-#endif
-
-#ifndef SIMPLEQ_INSERT_TAIL
-#define SIMPLEQ_INSERT_TAIL(head, elm, field) do {			\
-(elm)->field.sqe_next = NULL;					        \
-*(head)->sqh_last = (elm);					        \
-(head)->sqh_last = &(elm)->field.sqe_next;			        \
-} while (0)
-#endif
-
-#ifndef SIMPLEQ_INIT
-#define	SIMPLEQ_INIT(head) do {						\
-(head)->sqh_first = NULL;					        \
-(head)->sqh_last = &(head)->sqh_first;				        \
-} while (0)
-#endif
-
-#ifndef SIMPLEQ_INSERT_HEAD
-#define SIMPLEQ_INSERT_HEAD(head, elm, field) do {			\
-if (((elm)->field.sqe_next = (head)->sqh_first) == NULL)	        \
-(head)->sqh_last = &(elm)->field.sqe_next;		                \
-(head)->sqh_first = (elm);					        \
-} while (0)
-#endif
-
-#ifndef SIMPLEQ_FOREACH
-#define SIMPLEQ_FOREACH(var, head, field)				\
-for((var) = SIMPLEQ_FIRST(head);				        \
-(var) != SIMPLEQ_END(head);					        \
-(var) = SIMPLEQ_NEXT(var, field))
-#endif
-
-#ifndef SIMPLEQ_INSERT_AFTER
-#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
-if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)        \
-(head)->sqh_last = &(elm)->field.sqe_next;		                \
-(listelm)->field.sqe_next = (elm);				        \
-} while (0)
-#endif
-
-#ifndef SIMPLEQ_END
-#define	SIMPLEQ_END(head)	    NULL
-#endif
-
-#ifndef SIMPLEQ_NEXT
-#define	SIMPLEQ_NEXT(elm, field)    ((elm)->field.sqe_next)
-#endif
-
-#ifndef SIMPLEQ_HEAD_INITIALIZER
-#define SIMPLEQ_HEAD_INITIALIZER(head)					\
-{ NULL, &(head).sqh_first }
-#endif
-
-#ifndef SIMPLEQ_EMPTY
-#define	SIMPLEQ_EMPTY(head)	    (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
-#endif
-
-
-
-
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdtime.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdtime.h b/thirdparty/librdkafka-0.11.1/src/rdtime.h
deleted file mode 100644
index c770b04..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdtime.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv,ts) do {		\
-    (ts)->tv_sec = (tv)->tv_sec;		\
-    (ts)->tv_nsec = (tv)->tv_usec * 1000;	\
-  } while (0)
-
-#define TIMESPEC_TO_TIMEVAL(tv, ts) do {  \
-    (tv)->tv_sec = (ts)->tv_sec;	  \
-    (tv)->tv_usec = (ts)->tv_nsec / 1000; \
-  } while (0)
-#endif
-
-#define TIMESPEC_TO_TS(ts) \
-	(((rd_ts_t)(ts)->tv_sec * 1000000LLU) + ((ts)->tv_nsec / 1000))
-
-#define TS_TO_TIMESPEC(ts,tsx) do {			\
-	(ts)->tv_sec  = (tsx) / 1000000;		\
-        (ts)->tv_nsec = ((tsx) % 1000000) * 1000;	\
-	if ((ts)->tv_nsec >= 1000000000LLU) {		\
-	   (ts)->tv_sec++;				\
-	   (ts)->tv_nsec -= 1000000000LLU;		\
-	}						\
-       } while (0)
-
-#define TIMESPEC_CLEAR(ts) ((ts)->tv_sec = (ts)->tv_nsec = 0LLU)
-
-
-#define RD_POLL_INFINITE  -1
-#define RD_POLL_NOWAIT     0
-
-
-/**
- * @returns a monotonically increasing clock in microseconds.
- * @remark There is no monotonic clock on OSX, the system time
- *         is returned instead.
- */
-static RD_INLINE rd_ts_t rd_clock (void) RD_UNUSED;
-static RD_INLINE rd_ts_t rd_clock (void) {
-#ifdef __APPLE__
-	/* No monotonic clock on Darwin */
-	struct timeval tv;
-	gettimeofday(&tv, NULL);
-	return ((rd_ts_t)tv.tv_sec * 1000000LLU) + (rd_ts_t)tv.tv_usec;
-#elif defined(_MSC_VER)
-	return (rd_ts_t)GetTickCount64() * 1000LLU;
-#else
-	struct timespec ts;
-	clock_gettime(CLOCK_MONOTONIC, &ts);
-	return ((rd_ts_t)ts.tv_sec * 1000000LLU) + 
-		((rd_ts_t)ts.tv_nsec / 1000LLU);
-#endif
-}
-
-
-/**
- * @returns UTC wallclock time as number of microseconds since
- *          beginning of the epoch.
- */
-static RD_INLINE RD_UNUSED rd_ts_t rd_uclock (void) {
-	struct timeval tv;
-	rd_gettimeofday(&tv, NULL);
-	return ((rd_ts_t)tv.tv_sec * 1000000LLU) + (rd_ts_t)tv.tv_usec;
-}
-
-
-
-/**
- * Thread-safe version of ctime() that strips the trailing newline.
- */
-static RD_INLINE const char *rd_ctime (const time_t *t) RD_UNUSED;
-static RD_INLINE const char *rd_ctime (const time_t *t) {
-	static RD_TLS char ret[27];
-
-#ifndef _MSC_VER
-	ctime_r(t, ret);
-#else
-	ctime_s(ret, sizeof(ret), t);
-#endif
-	ret[25] = '\0';
-
-	return ret;
-}
-
-
-/**
- * @brief Initialize an absolute timeout based on the provided \p timeout_ms
- *
- * To be used with rd_timeout_adjust().
- *
- * Honours RD_POLL_INFINITE, RD_POLL_NOWAIT.
- *
- * @returns the absolute timeout which should later be passed
- *          to rd_timeout_adjust().
- */
-static RD_INLINE rd_ts_t rd_timeout_init (int timeout_ms) {
-	if (timeout_ms == RD_POLL_INFINITE ||
-	    timeout_ms == RD_POLL_NOWAIT)
-		return timeout_ms;
-
-	return rd_clock() + (timeout_ms * 1000);
-}
-
-
-/**
- * @returns the remaining timeout for timeout \p abs_timeout previously set
- *          up by rd_timeout_init()
- *
- * Honours RD_POLL_INFINITE, RD_POLL_NOWAIT.
- */
-static RD_INLINE int rd_timeout_remains (rd_ts_t abs_timeout) {
-	int timeout_ms;
-
-	if (abs_timeout == RD_POLL_INFINITE ||
-	    abs_timeout == RD_POLL_NOWAIT)
-		return (int)abs_timeout;
-
-	timeout_ms = (int)((abs_timeout - rd_clock()) / 1000);
-	if (timeout_ms <= 0)
-		return RD_POLL_NOWAIT;
-	else
-		return timeout_ms;
-}
-
-/**
- * @brief Like rd_timeout_remains() but limits the maximum time to \p limit_ms
- */
-static RD_INLINE int
-rd_timeout_remains_limit (rd_ts_t abs_timeout, int limit_ms) {
-	int timeout_ms = rd_timeout_remains(abs_timeout);
-
-	if (timeout_ms == RD_POLL_INFINITE || timeout_ms > limit_ms)
-		return limit_ms;
-	else
-		return timeout_ms;
-}
-
-
-/**
- * @returns 1 if the **relative** timeout as returned by rd_timeout_remains()
- *          has timed out / expired, else 0.
- */
-static RD_INLINE int rd_timeout_expired (int timeout_ms) {
-	return timeout_ms == RD_POLL_NOWAIT;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdtypes.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdtypes.h b/thirdparty/librdkafka-0.11.1/src/rdtypes.h
deleted file mode 100644
index 0206079..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdtypes.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include <inttypes.h>
-
-
-/*
- * Fundamental types
- */
-
-
-/* Timestamp (microseconds) */
-typedef int64_t rd_ts_t;
-
-#define RD_TS_MAX  INT64_MAX

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdunittest.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdunittest.c b/thirdparty/librdkafka-0.11.1/src/rdunittest.c
deleted file mode 100644
index b1c802e..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdunittest.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdunittest.h"
-
-#include "rdvarint.h"
-#include "rdbuf.h"
-#include "crc32c.h"
-
-
-int rd_unittest (void) {
-        int fails = 0;
-        fails += unittest_rdbuf();
-        fails += unittest_rdvarint();
-        fails += unittest_crc32c();
-        return fails;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdunittest.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdunittest.h b/thirdparty/librdkafka-0.11.1/src/rdunittest.h
deleted file mode 100644
index a8d29da..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdunittest.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RD_UNITTEST_H
-#define _RD_UNITTEST_H
-
-#include <stdio.h>
-
-
-/**
- * @brief Fail the current unit-test function.
- */
-#define RD_UT_FAIL(...) do {                                            \
-                fprintf(stderr, "\033[31mRDUT: FAIL: %s:%d: %s: ",      \
-                        __FILE__, __LINE__, __FUNCTION__);              \
-                fprintf(stderr, __VA_ARGS__);                           \
-                fprintf(stderr, "\033[0m\n");                           \
-                return 1;                                               \
-        } while (0)
-
-/**
- * @brief Pass the current unit-test function
- */
-#define RD_UT_PASS() do {                                               \
-                fprintf(stderr, "\033[32mRDUT: PASS: %s:%d: %s\033[0m\n", \
-                        __FILE__, __LINE__, __FUNCTION__);              \
-                return 0;                                               \
-        } while (0)
-
-/**
- * @brief Fail unit-test if \p expr is false
- */
-#define RD_UT_ASSERT(expr,...) do {                                     \
-        if (!(expr)) {                                                  \
-        fprintf(stderr,                                                 \
-                "\033[31mRDUT: FAIL: %s:%d: %s: assert failed: " # expr ": ", \
-                __FILE__, __LINE__, __FUNCTION__);                      \
-        fprintf(stderr, __VA_ARGS__);                                   \
-        fprintf(stderr, "\033[0m\n");                                   \
-        return 1;                                                       \
-        }                                                               \
-         } while (0)
-
-
-/**
- * @brief Log something from a unit-test
- */
-#define RD_UT_SAY(...) do {                                             \
-                fprintf(stderr, "RDUT: INFO: %s:%d: %s: ",              \
-                        __FILE__, __LINE__, __FUNCTION__);              \
-                fprintf(stderr, __VA_ARGS__);                           \
-                fprintf(stderr, "\n");                                  \
-        } while (0)
-
-
-int rd_unittest (void);
-
-#endif /* _RD_UNITTEST_H */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdvarint.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdvarint.c b/thirdparty/librdkafka-0.11.1/src/rdvarint.c
deleted file mode 100644
index cd7699b..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdvarint.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rdvarint.h"
-#include "rdunittest.h"
-
-
-/**
- * @brief Read a varint-encoded signed integer from \p slice.
- */
-size_t rd_varint_dec_slice (rd_slice_t *slice, int64_t *nump) {
-        size_t num = 0;
-        int shift = 0;
-        unsigned char oct;
-
-        /* FIXME: Optimize to use something better than read() */
-        do {
-                size_t r = rd_slice_read(slice, &oct, sizeof(oct));
-                if (unlikely(r == 0))
-                        return 0; /* Underflow */
-                num |= (uint64_t)(oct & 0x7f) << shift;
-                shift += 7;
-        } while (oct & 0x80);
-
-        *nump = (int64_t)((num >> 1) ^ -(int64_t)(num & 1));
-
-        return shift / 7;
-}
-
-
-
-
-
-static int do_test_rd_uvarint_enc_i64 (const char *file, int line,
-                                       int64_t num, const char *exp,
-                                       size_t exp_size) {
-        char buf[16] = { 0xff, 0xff, 0xff, 0xff,
-                         0xff, 0xff, 0xff, 0xff,
-                         0xff, 0xff, 0xff, 0xff,
-                         0xff, 0xff, 0xff, 0xff };
-        size_t sz = rd_uvarint_enc_i64(buf, sizeof(buf), num);
-        size_t r;
-        int ir;
-        rd_buf_t b;
-        rd_slice_t slice, bad_slice;
-        int64_t ret_num;
-
-        if (sz != exp_size || memcmp(buf, exp, exp_size))
-                RD_UT_FAIL("i64 encode of %"PRId64": "
-                           "expected size %"PRIusz" (got %"PRIusz")\n",
-                           num, exp_size, sz);
-
-        /* Verify with standard decoder */
-        r = rd_varint_dec_i64(buf, sz, &ret_num);
-        RD_UT_ASSERT(!RD_UVARINT_DEC_FAILED(r),
-                     "varint decode failed: %"PRIusz, r);
-        RD_UT_ASSERT(ret_num == num,
-                     "varint decode returned wrong number: "
-                     "%"PRId64" != %"PRId64, ret_num, num);
-
-        /* Verify with slice decoder */
-        rd_buf_init(&b, 1, 0);
-        rd_buf_push(&b, buf, sz, NULL);
-        rd_slice_init_full(&slice, &b);
-
-        /* Should fail for incomplete reads */
-        ir = rd_slice_narrow_copy(&slice, &bad_slice,
-                                  rd_slice_remains(&slice)-1);
-        RD_UT_ASSERT(ir, "narrow_copy failed");
-        ret_num = -1;
-        r = rd_varint_dec_slice(&bad_slice, &ret_num);
-        RD_UT_ASSERT(RD_UVARINT_DEC_FAILED(r),
-                     "varint decode failed should have failed, returned %"PRIusz,
-                     r);
-
-        /* Verify proper slice */
-        ret_num = -1;
-        r = rd_varint_dec_slice(&slice, &ret_num);
-        RD_UT_ASSERT(!RD_UVARINT_DEC_FAILED(r),
-                     "varint decode failed: %"PRIusz, r);
-        RD_UT_ASSERT(ret_num == num,
-                     "varint decode returned wrong number: "
-                     "%"PRId64" != %"PRId64, ret_num, num);
-
-        rd_buf_destroy(&b);
-
-        RD_UT_PASS();
-}
-
-
-int unittest_rdvarint (void) {
-        int fails = 0;
-
-        fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 23,
-                                            (const char[]){ 23<<1 }, 1);
-        fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 253,
-                                            (const char[]){ 0xfa,  3 }, 2);
-
-        return fails;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdvarint.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdvarint.h b/thirdparty/librdkafka-0.11.1/src/rdvarint.h
deleted file mode 100644
index 407bfb0..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdvarint.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _RDVARINT_H
-#define _RDVARINT_H
-
-#include "rd.h"
-#include "rdbuf.h"
-
-/**
- * @name signed varint zig-zag encoder/decoder
- * @{
- *
- */
-
-/**
- * @brief unsigned-varint encodes unsigned integer \p num into buffer
- *        at \p dst of size \p dstsize.
- * @returns the number of bytes written to \p dst, or 0 if not enough space.
- */
-
-static RD_INLINE RD_UNUSED
-size_t rd_uvarint_enc_u64 (char *dst, size_t dstsize, uint64_t num) {
-        size_t of = 0;
-
-        do {
-                if (unlikely(of >= dstsize))
-                        return 0; /* Not enough space */
-
-                dst[of++] = (num & 0x7f) | (num > 0x7f ? 0x80 : 0);
-                num >>= 7;
-        } while (num);
-
-        return of;
-}
-
-/**
- * @brief encodes a signed integer using zig-zag encoding.
- * @sa rd_uvarint_enc_u64
- */
-static RD_INLINE RD_UNUSED
-size_t rd_uvarint_enc_i64 (char *dst, size_t dstsize, int64_t num) {
-        return rd_uvarint_enc_u64(dst, dstsize, (num << 1) ^ (num >> 63));
-}
-
-
-static RD_INLINE RD_UNUSED
-size_t rd_uvarint_enc_i32 (char *dst, size_t dstsize, int32_t num) {
-        return rd_uvarint_enc_i64(dst, dstsize, num);
-}
-
-
-
-/**
- * @brief Use on return value from rd_uvarint_dec() to check if
- *        decoded varint fit the size_t.
- *
- * @returns 1 on overflow, else 0.
- */
-#define RD_UVARINT_OVERFLOW(DEC_RETVAL) (DEC_RETVAL > SIZE_MAX)
-
-/**
- * @returns 1 if there were not enough bytes to decode the varint, else 0.
- */
-#define RD_UVARINT_UNDERFLOW(DEC_RETVAL) (DEC_RETVAL == 0)
-
-
-/**
- * @param DEC_RETVAL the return value from \c rd_uvarint_dec()
- * @returns 1 if varint decoding failed, else 0.
- * @warning \p DEC_RETVAL will be evaluated twice.
- */
-#define RD_UVARINT_DEC_FAILED(DEC_RETVAL) \
-        (RD_UVARINT_UNDERFLOW(DEC_RETVAL) || RD_UVARINT_OVERFLOW(DEC_RETVAL))
-
-
-/**
- * @brief Decodes the unsigned-varint in buffer \p src of size \p srcsize
- *        and stores the decoded unsigned integer in \p nump.
- *
- * @remark Use RD_UVARINT_OVERFLOW(returnvalue) to check if the varint
- *         could not fit \p nump, and RD_UVARINT_UNDERFLOW(returnvalue) to
- *         check if there were not enough bytes available in \p src to
- *         decode the full varint.
- *
- * @returns the number of bytes read from \p src.
- */
-static RD_INLINE RD_UNUSED
-size_t rd_uvarint_dec (const char *src, size_t srcsize, size_t *nump) {
-        size_t of = 0;
-        size_t num = 0;
-        int shift = 0;
-
-        do {
-                if (unlikely(srcsize-- == 0))
-                        return 0; /* Underflow */
-                num |= (uint64_t)(src[(int)of] & 0x7f) << shift;
-                shift += 7;
-        } while (src[(int)of++] & 0x80);
-
-        *nump = num;
-        return of;
-}
-
-static RD_INLINE RD_UNUSED
-size_t rd_varint_dec_i64 (const char *src, size_t srcsize, int64_t *nump) {
-        size_t n;
-        size_t r;
-
-        r = rd_uvarint_dec(src, srcsize, &n);
-        if (likely(!RD_UVARINT_DEC_FAILED(r)))
-                *nump = (int64_t)(n >> 1) ^ -(int64_t)(n & 1);
-
-        return r;
-}
-
-
-/**
- * @brief Read a varint-encoded signed integer from \p slice.
- *
- * @sa rd_uvarint_dec()
- */
-size_t rd_varint_dec_slice (rd_slice_t *slice, int64_t *nump);
-
-
-/**
- * @returns the maximum encoded size for a type
- */
-#define RD_UVARINT_ENC_SIZEOF(TYPE) \
-        (sizeof(TYPE) + 1 + (sizeof(TYPE)/7))
-
-/**
- * @returns the encoding size of the value 0
- */
-#define RD_UVARINT_ENC_SIZE_0() 1
-
-
-int unittest_rdvarint (void);
-
-/**@}*/
-
-
-#endif /* _RDVARINT_H */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdwin32.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdwin32.h b/thirdparty/librdkafka-0.11.1/src/rdwin32.h
deleted file mode 100644
index dfd16d1..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdwin32.h
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
-* librdkafka - Apache Kafka C library
-*
-* Copyright (c) 2012-2015 Magnus Edenhill
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are met:
-*
-* 1. Redistributions of source code must retain the above copyright notice,
-*    this list of conditions and the following disclaimer.
-* 2. Redistributions in binary form must reproduce the above copyright notice,
-*    this list of conditions and the following disclaimer in the documentation
-*    and/or other materials provided with the distribution.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/**
- * Win32 (Visual Studio) support
- */
-#pragma once
-
-
-#include <stdlib.h>
-#include <inttypes.h>
-#include <sys/types.h>
-#include <time.h>
-#include <assert.h>
-#define WIN32_MEAN_AND_LEAN
-#include <Winsock2.h>  /* for struct timeval */
-#include <io.h>
-#include <fcntl.h>
-
-
-/**
- * Types
- */
-typedef SSIZE_T ssize_t;
-typedef int socklen_t;
-
-struct iovec {
-	void *iov_base;
-	size_t iov_len;
-};
-
-struct msghdr {
-	struct iovec  *msg_iov;
-	int            msg_iovlen;
-};
-
-#define LOG_EMERG   0
-#define LOG_ALERT   1
-#define LOG_CRIT    2
-#define LOG_ERR     3
-#define LOG_WARNING 4
-#define LOG_NOTICE  5
-#define LOG_INFO    6
-#define LOG_DEBUG   7
-
-
-
-/**
-* Annotations, attributes, optimizers
-*/
-#ifndef likely
-#define likely(x)   x
-#endif
-#ifndef unlikely
-#define unlikely(x) x
-#endif
-
-#define RD_UNUSED
-#define RD_INLINE  __inline
-#define RD_WARN_UNUSED_RESULT
-#define RD_NORETURN __declspec(noreturn)
-#define RD_IS_CONSTANT(p)  (0)
-#define RD_TLS __declspec(thread)
-
-
-/**
- * Allocation
- */
-#define rd_alloca(N) _alloca(N)
-
-
-/**
- * Strings, formatting, printf, ..
- */
-
-/* size_t and ssize_t format strings */
-#define PRIusz  "Iu"
-#define PRIdsz  "Id"
-
-#define RD_FORMAT(...)
-
-static RD_UNUSED RD_INLINE
-int rd_vsnprintf (char *str, size_t size, const char *format, va_list ap) {
-        int cnt = -1;
-
-        if (size != 0)
-                cnt = _vsnprintf_s(str, size, _TRUNCATE, format, ap);
-        if (cnt == -1)
-                cnt = _vscprintf(format, ap);
-
-        return cnt;
-}
-
-static RD_UNUSED RD_INLINE
-int rd_snprintf (char *str, size_t size, const char *format, ...) {
-        int cnt;
-        va_list ap;
-
-        va_start(ap, format);
-        cnt = rd_vsnprintf(str, size, format, ap);
-        va_end(ap);
-
-        return cnt;
-}
-
-
-#define rd_strcasecmp(A,B) _stricmp(A,B)
-#define rd_strncasecmp(A,B,N) _strnicmp(A,B,N)
-
-
-/**
- * Errors
- */
-static RD_INLINE RD_UNUSED const char *rd_strerror(int err) {
-	static RD_TLS char ret[128];
-
-	strerror_s(ret, sizeof(ret) - 1, err);
-	return ret;
-}
-
-
-/**
- * Atomics
- */
-#ifndef __cplusplus
-#include "rdatomic.h"
-#endif
-
-
-/**
- * Misc
- */
-
-/**
- * Microsecond sleep.
- * 'retry': if true, retry if sleep is interrupted (because of signal)
- */
-#define rd_usleep(usec,terminate)  Sleep((usec) / 1000)
-
-
-/**
- * @brief gettimeofday() for win32
- */
-static RD_UNUSED
-int rd_gettimeofday (struct timeval *tv, struct timezone *tz) {
-	SYSTEMTIME st;
-	FILETIME   ft;
-	ULARGE_INTEGER d;
-
-	GetSystemTime(&st);
-	SystemTimeToFileTime(&st, &ft);
-	d.HighPart = ft.dwHighDateTime;
-	d.LowPart  = ft.dwLowDateTime;
-	tv->tv_sec  = (long)((d.QuadPart - 116444736000000000llu) / 10000000L);
-	tv->tv_usec = (long)(st.wMilliseconds * 1000);
-
-	return 0;
-}
-
-
-#define rd_assert(EXPR)  assert(EXPR)
-
-
-/**
- * Empty struct initializer
- */
-#define RD_ZERO_INIT  {0}
-
-#ifndef __cplusplus
-/**
- * Sockets, IO
- */
-
-/**
- * @brief Set socket to non-blocking
- * @returns 0 on success or -1 on failure (see rd_kafka_socket_errno)
- */
-static RD_UNUSED int rd_fd_set_nonblocking (int fd) {
-        int on = 1;
-        if (ioctlsocket(fd, FIONBIO, &on) == SOCKET_ERROR)
-                return (int)WSAGetLastError();
-        return 0;
-}
-
-/**
- * @brief Create non-blocking pipe
- * @returns 0 on success or errno on failure
- */
-static RD_UNUSED int rd_pipe_nonblocking (int *fds) {
-        HANDLE h[2];
-        int i;
-
-        if (!CreatePipe(&h[0], &h[1], NULL, 0))
-                return (int)GetLastError();
-        for (i = 0 ; i < 2 ; i++) {
-                DWORD mode = PIPE_NOWAIT;
-                /* Set non-blocking */
-                if (!SetNamedPipeHandleState(h[i], &mode, NULL, NULL)) {
-                        CloseHandle(h[0]);
-                        CloseHandle(h[1]);
-                        return (int)GetLastError();
-                }
-
-                /* Open file descriptor for handle */
-                fds[i] = _open_osfhandle((intptr_t)h[i],
-                                         i == 0 ?
-                                         O_RDONLY | O_BINARY :
-                                         O_WRONLY | O_BINARY);
-
-                if (fds[i] == -1) {
-                        CloseHandle(h[0]);
-                        CloseHandle(h[1]);
-                        return (int)GetLastError();
-                }
-        }
-        return 0;
-}
-
-#define rd_read(fd,buf,sz) _read(fd,buf,sz)
-#define rd_write(fd,buf,sz) _write(fd,buf,sz)
-#define rd_close(fd) closesocket(fd)
-
-static RD_UNUSED char *
-rd_strerror_w32 (DWORD errcode, char *dst, size_t dstsize) {
-        char *t;
-        FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM |
-                       FORMAT_MESSAGE_IGNORE_INSERTS,
-                       NULL, errcode,
-                       MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
-                       (LPSTR)dst, (DWORD)dstsize - 1, NULL);
-        /* Remove newlines */
-        while ((t = strchr(dst, (int)'\r')) || (t = strchr(dst, (int)'\n')))
-                *t = (char)'.';
-        return dst;
-}
-
-#endif /* !__cplusplus*/


[26/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_metadata_cache.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_metadata_cache.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_metadata_cache.c
deleted file mode 100644
index 5781d0f..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_metadata_cache.c
+++ /dev/null
@@ -1,732 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_request.h"
-#include "rdkafka_metadata.h"
-
-#include <string.h>
-/**
- * @{
- *
- * @brief Metadata cache
- *
- * The metadata cache consists of cached topic metadata as
- * retrieved from the cluster using MetadataRequest.
- *
- * The topic cache entries are made up \c struct rd_kafka_metadata_cache_entry
- * each containing the topic name, a copy of the topic's metadata
- * and a cache expiry time.
- *
- * On update any previous entry for the topic are removed and replaced
- * with a new entry.
- *
- * The cache is also populated when the topic metadata is being requested
- * for specific topics, this will not interfere with existing cache entries
- * for topics, but for any topics not currently in the cache a new
- * entry will be added with a flag (RD_KAFKA_METADATA_CACHE_VALID(rkmce))
- * indicating that the entry is waiting to be populated by the MetadataResponse.
- *
- * The cache is locked in its entirety with rd_kafka_wr/rdlock() by the caller
- * and the returned cache entry must only be accessed during the duration
- * of the lock.
- *
- */
-
-static void rd_kafka_metadata_cache_propagate_changes (rd_kafka_t *rk);
-
-
-/**
- * @brief Remove and free cache entry.
- *
- * @remark The expiry timer is not updated, for simplicity.
- * @locks rd_kafka_wrlock()
- */
-static RD_INLINE void
-rd_kafka_metadata_cache_delete (rd_kafka_t *rk,
-                                struct rd_kafka_metadata_cache_entry *rkmce,
-                                int unlink_avl) {
-        if (unlink_avl)
-                RD_AVL_REMOVE_ELM(&rk->rk_metadata_cache.rkmc_avl, rkmce);
-        TAILQ_REMOVE(&rk->rk_metadata_cache.rkmc_expiry, rkmce, rkmce_link);
-        rd_kafka_assert(NULL, rk->rk_metadata_cache.rkmc_cnt > 0);
-        rk->rk_metadata_cache.rkmc_cnt--;
-
-        rd_free(rkmce);
-}
-
-/**
- * @brief Delete cache entry by topic name
- * @locks rd_kafka_wrlock()
- * @returns 1 if entry was found and removed, else 0.
- */
-static int rd_kafka_metadata_cache_delete_by_name (rd_kafka_t *rk,
-                                                    const char *topic) {
-        struct rd_kafka_metadata_cache_entry *rkmce;
-
-        rkmce = rd_kafka_metadata_cache_find(rk, topic, 1);
-        if (rkmce)
-                rd_kafka_metadata_cache_delete(rk, rkmce, 1);
-        return rkmce ? 1 : 0;
-}
-
-static int rd_kafka_metadata_cache_evict (rd_kafka_t *rk);
-
-/**
- * @brief Cache eviction timer callback.
- * @locality rdkafka main thread
- * @locks NOT rd_kafka_*lock()
- */
-static void rd_kafka_metadata_cache_evict_tmr_cb (rd_kafka_timers_t *rkts,
-                                                  void *arg) {
-        rd_kafka_t *rk = arg;
-
-        rd_kafka_wrlock(rk);
-        rd_kafka_metadata_cache_evict(rk);
-        rd_kafka_wrunlock(rk);
-}
-
-
-/**
- * @brief Evict timed out entries from cache and rearm timer for
- *        next expiry.
- *
- * @returns the number of entries evicted.
- *
- * @locks rd_kafka_wrlock()
- */
-static int rd_kafka_metadata_cache_evict (rd_kafka_t *rk) {
-        int cnt = 0;
-        rd_ts_t now = rd_clock();
-        struct rd_kafka_metadata_cache_entry *rkmce;
-
-        while ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry)) &&
-               rkmce->rkmce_ts_expires <= now) {
-                rd_kafka_metadata_cache_delete(rk, rkmce, 1);
-                cnt++;
-        }
-
-        if (rkmce)
-                rd_kafka_timer_start(&rk->rk_timers,
-                                     &rk->rk_metadata_cache.rkmc_expiry_tmr,
-                                     rkmce->rkmce_ts_expires - now,
-                                     rd_kafka_metadata_cache_evict_tmr_cb,
-                                     rk);
-        else
-                rd_kafka_timer_stop(&rk->rk_timers,
-                                    &rk->rk_metadata_cache.rkmc_expiry_tmr, 1);
-
-        rd_kafka_dbg(rk, METADATA, "METADATA",
-                     "Expired %d entries from metadata cache "
-                     "(%d entries remain)",
-                     cnt, rk->rk_metadata_cache.rkmc_cnt);
-
-        if (cnt)
-                rd_kafka_metadata_cache_propagate_changes(rk);
-
-        return cnt;
-}
-
-
-/**
- * @brief Find cache entry by topic name
- *
- * @param valid: entry must be valid (not hint)
- *
- * @locks rd_kafka_*lock()
- */
-struct rd_kafka_metadata_cache_entry *
-rd_kafka_metadata_cache_find (rd_kafka_t *rk, const char *topic, int valid) {
-        struct rd_kafka_metadata_cache_entry skel, *rkmce;
-        skel.rkmce_mtopic.topic = (char *)topic;
-        rkmce = RD_AVL_FIND(&rk->rk_metadata_cache.rkmc_avl, &skel);
-        if (rkmce && (!valid || RD_KAFKA_METADATA_CACHE_VALID(rkmce)))
-                return rkmce;
-        return NULL;
-}
-
-
-/**
- * @brief Partition (id) comparator
- */
-static int rd_kafka_metadata_partition_id_cmp (const void *_a,
-                                               const void *_b) {
-        const rd_kafka_metadata_partition_t *a = _a, *b = _b;
-        return a->id - b->id;
-}
-
-
-/**
- * @brief Add (and replace) cache entry for topic.
- *
- * This makes a copy of \p topic
- *
- * @locks rd_kafka_wrlock()
- */
-static struct rd_kafka_metadata_cache_entry *
-rd_kafka_metadata_cache_insert (rd_kafka_t *rk,
-                                const rd_kafka_metadata_topic_t *mtopic,
-                                rd_ts_t now, rd_ts_t ts_expires) {
-        struct rd_kafka_metadata_cache_entry *rkmce, *old;
-        size_t topic_len;
-        rd_tmpabuf_t tbuf;
-        int i;
-
-        /* Metadata is stored in one contigious buffer where structs and
-         * and pointed-to fields are layed out in a memory aligned fashion.
-         * rd_tmpabuf_t provides the infrastructure to do this.
-         * Because of this we copy all the structs verbatim but
-         * any pointer fields needs to be copied explicitly to update
-         * the pointer address. */
-        topic_len = strlen(mtopic->topic) + 1;
-        rd_tmpabuf_new(&tbuf,
-                       RD_ROUNDUP(sizeof(*rkmce), 8) +
-                       RD_ROUNDUP(topic_len, 8) +
-                       (mtopic->partition_cnt *
-                        RD_ROUNDUP(sizeof(*mtopic->partitions), 8)),
-                       1/*assert on fail*/);
-
-        rkmce = rd_tmpabuf_alloc(&tbuf, sizeof(*rkmce));
-
-        rkmce->rkmce_mtopic = *mtopic;
-
-        /* Copy topic name and update pointer */
-        rkmce->rkmce_mtopic.topic = rd_tmpabuf_write_str(&tbuf, mtopic->topic);
-
-        /* Copy partition array and update pointer */
-        rkmce->rkmce_mtopic.partitions =
-                rd_tmpabuf_write(&tbuf, mtopic->partitions,
-                                 mtopic->partition_cnt *
-                                 sizeof(*mtopic->partitions));
-
-        /* Clear uncached fields. */
-        for (i = 0 ; i < mtopic->partition_cnt ; i++) {
-                rkmce->rkmce_mtopic.partitions[i].replicas = NULL;
-                rkmce->rkmce_mtopic.partitions[i].replica_cnt = 0;
-                rkmce->rkmce_mtopic.partitions[i].isrs = NULL;
-                rkmce->rkmce_mtopic.partitions[i].isr_cnt = 0;
-        }
-
-        /* Sort partitions for future bsearch() lookups. */
-        qsort(rkmce->rkmce_mtopic.partitions,
-              rkmce->rkmce_mtopic.partition_cnt,
-              sizeof(*rkmce->rkmce_mtopic.partitions),
-              rd_kafka_metadata_partition_id_cmp);
-
-        TAILQ_INSERT_TAIL(&rk->rk_metadata_cache.rkmc_expiry,
-                          rkmce, rkmce_link);
-        rk->rk_metadata_cache.rkmc_cnt++;
-        rkmce->rkmce_ts_expires = ts_expires;
-        rkmce->rkmce_ts_insert = now;
-
-        /* Insert (and replace existing) entry. */
-        old = RD_AVL_INSERT(&rk->rk_metadata_cache.rkmc_avl, rkmce,
-                            rkmce_avlnode);
-        if (old)
-                rd_kafka_metadata_cache_delete(rk, old, 0);
-
-        /* Explicitly not freeing the tmpabuf since rkmce points to its
-         * memory. */
-        return rkmce;
-}
-
-
-/**
- * @brief Purge the metadata cache
- *
- * @locks rd_kafka_wrlock()
- */
-static void rd_kafka_metadata_cache_purge (rd_kafka_t *rk) {
-        struct rd_kafka_metadata_cache_entry *rkmce;
-        int was_empty = TAILQ_EMPTY(&rk->rk_metadata_cache.rkmc_expiry);
-
-        while ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry)))
-                rd_kafka_metadata_cache_delete(rk, rkmce, 1);
-
-        rd_kafka_timer_stop(&rk->rk_timers,
-                            &rk->rk_metadata_cache.rkmc_expiry_tmr, 1);
-
-        if (!was_empty)
-                rd_kafka_metadata_cache_propagate_changes(rk);
-}
-
-
-/**
- * @brief Start or update the cache expiry timer.
- *        Typically done after a series of cache_topic_update()
- *
- * @locks rd_kafka_wrlock()
- */
-void rd_kafka_metadata_cache_expiry_start (rd_kafka_t *rk) {
-        struct rd_kafka_metadata_cache_entry *rkmce;
-
-        if ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry)))
-                rd_kafka_timer_start(&rk->rk_timers,
-                                     &rk->rk_metadata_cache.rkmc_expiry_tmr,
-                                     rkmce->rkmce_ts_expires - rd_clock(),
-                                     rd_kafka_metadata_cache_evict_tmr_cb,
-                                     rk);
-}
-
-/**
- * @brief Update the metadata cache for a single topic
- *        with the provided metadata.
- *        If the topic has an error the existing entry is removed
- *        and no new entry is added, which avoids the topic to be
- *        suppressed in upcoming metadata requests because being in the cache.
- *        In other words: we want to re-query errored topics.
- *
- * @remark The cache expiry timer will not be updated/started,
- *         call rd_kafka_metadata_cache_expiry_start() instead.
- *
- * @locks rd_kafka_wrlock()
- */
-void
-rd_kafka_metadata_cache_topic_update (rd_kafka_t *rk,
-                                      const rd_kafka_metadata_topic_t *mdt) {
-        rd_ts_t now = rd_clock();
-        rd_ts_t ts_expires = now + (rk->rk_conf.metadata_max_age_ms * 1000);
-        int changed = 1;
-
-        if (!mdt->err)
-                rd_kafka_metadata_cache_insert(rk, mdt, now, ts_expires);
-        else
-                changed = rd_kafka_metadata_cache_delete_by_name(rk,
-                                                                 mdt->topic);
-
-        if (changed)
-                rd_kafka_metadata_cache_propagate_changes(rk);
-}
-
-
-/**
- * @brief Update the metadata cache with the provided metadata.
- *
- * @param abs_update int: absolute update: purge cache before updating.
- *
- * @locks rd_kafka_wrlock()
- */
-void rd_kafka_metadata_cache_update (rd_kafka_t *rk,
-                                     const rd_kafka_metadata_t *md,
-                                     int abs_update) {
-        struct rd_kafka_metadata_cache_entry *rkmce;
-        rd_ts_t now = rd_clock();
-        rd_ts_t ts_expires = now + (rk->rk_conf.metadata_max_age_ms * 1000);
-        int i;
-
-        rd_kafka_dbg(rk, METADATA, "METADATA",
-                     "%s of metadata cache with %d topic(s)",
-                     abs_update ? "Absolute update" : "Update",
-                     md->topic_cnt);
-
-        if (abs_update)
-                rd_kafka_metadata_cache_purge(rk);
-
-
-        for (i = 0 ; i < md->topic_cnt ; i++)
-                rd_kafka_metadata_cache_insert(rk, &md->topics[i], now,
-                                               ts_expires);
-
-        /* Update expiry timer */
-        if ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry)))
-                rd_kafka_timer_start(&rk->rk_timers,
-                                     &rk->rk_metadata_cache.rkmc_expiry_tmr,
-                                     rkmce->rkmce_ts_expires - now,
-                                     rd_kafka_metadata_cache_evict_tmr_cb,
-                                     rk);
-
-        if (md->topic_cnt > 0)
-                rd_kafka_metadata_cache_propagate_changes(rk);
-}
-
-
-/**
- * @brief Remove cache hints for topics in \p topics
- *        This is done when the Metadata response has been parsed and
- *        replaced hints with existing topic information, thus this will
- *        only remove unmatched topics from the cache.
- *
- * @locks rd_kafka_wrlock()
- */
-void rd_kafka_metadata_cache_purge_hints (rd_kafka_t *rk,
-                                          const rd_list_t *topics) {
-        const char *topic;
-        int i;
-        int cnt = 0;
-
-        RD_LIST_FOREACH(topic, topics, i) {
-                struct rd_kafka_metadata_cache_entry *rkmce;
-
-                if (!(rkmce = rd_kafka_metadata_cache_find(rk, topic,
-                                                           0/*any*/)) ||
-                    RD_KAFKA_METADATA_CACHE_VALID(rkmce))
-                        continue;
-
-                rd_kafka_metadata_cache_delete(rk, rkmce, 1/*unlink avl*/);
-                cnt++;
-        }
-
-        if (cnt > 0) {
-                rd_kafka_dbg(rk, METADATA, "METADATA",
-                             "Purged %d/%d cached topic hint(s)",
-                             cnt, rd_list_cnt(topics));
-                rd_kafka_metadata_cache_propagate_changes(rk);
-        }
-}
-
-
-/**
- * @brief Inserts a non-valid entry for topics in \p topics indicating
- *        that a MetadataRequest is in progress.
- *        This avoids sending multiple MetadataRequests for the same topics
- *        if there are already outstanding requests, see
- *        \c rd_kafka_metadata_refresh_topics().
- *
- * @remark These non-valid cache entries' expire time is set to the
- *         MetadataRequest timeout.
- *
- * @param dst rd_list_t(char *topicname): if not NULL: populated with
- *        topics that were added as hints to cache, e.q., topics to query.
- * @param topics rd_list_t(char *topicname)
- * @param replace int: replace existing valid entries
- *
- * @returns the number of topic hints inserted.
- *
- * @locks rd_kafka_wrlock()
- */
-int rd_kafka_metadata_cache_hint (rd_kafka_t *rk,
-                                  const rd_list_t *topics, rd_list_t *dst,
-                                  int replace) {
-        const char *topic;
-        rd_ts_t now = rd_clock();
-        rd_ts_t ts_expires = now + (rk->rk_conf.socket_timeout_ms * 1000);
-        int i;
-        int cnt = 0;
-
-        RD_LIST_FOREACH(topic, topics, i) {
-                rd_kafka_metadata_topic_t mtopic = {
-                        .topic = (char *)topic,
-                        .err = RD_KAFKA_RESP_ERR__WAIT_CACHE
-                };
-                const struct rd_kafka_metadata_cache_entry *rkmce;
-
-                /* !replace: Dont overwrite valid entries */
-                if (!replace &&
-                    (rkmce =
-                     rd_kafka_metadata_cache_find(rk, topic, 0/*any*/))) {
-                        if (RD_KAFKA_METADATA_CACHE_VALID(rkmce) || dst)
-                                continue;
-                        /* FALLTHRU */
-                }
-
-                rd_kafka_metadata_cache_insert(rk, &mtopic, now, ts_expires);
-                cnt++;
-
-                if (dst)
-                        rd_list_add(dst, rd_strdup(topic));
-
-        }
-
-        if (cnt > 0)
-                rd_kafka_dbg(rk, METADATA, "METADATA",
-                             "Hinted cache of %d/%d topic(s) being queried",
-                             cnt, rd_list_cnt(topics));
-
-        return cnt;
-}
-
-
-/**
- * @brief Same as rd_kafka_metadata_cache_hint() but takes
- *        a topic+partition list as input instead.
- */
-int rd_kafka_metadata_cache_hint_rktparlist (
-        rd_kafka_t *rk,
-        const rd_kafka_topic_partition_list_t *rktparlist,
-        rd_list_t *dst,
-        int replace) {
-        rd_list_t topics;
-        int r;
-
-        rd_list_init(&topics, rktparlist->cnt, rd_free);
-        rd_kafka_topic_partition_list_get_topic_names(rktparlist, &topics,
-                                                      0/*dont include regex*/);
-        r = rd_kafka_metadata_cache_hint(rk, &topics, dst, replace);
-        rd_list_destroy(&topics);
-        return r;
-}
-
-
-/**
- * @brief Cache entry comparator (on topic name)
- */
-static int rd_kafka_metadata_cache_entry_cmp (const void *_a, const void *_b) {
-        const struct rd_kafka_metadata_cache_entry *a = _a, *b = _b;
-        return strcmp(a->rkmce_mtopic.topic, b->rkmce_mtopic.topic);
-}
-
-
-/**
- * @brief Initialize the metadata cache
- *
- * @locks rd_kafka_wrlock()
- */
-void rd_kafka_metadata_cache_init (rd_kafka_t *rk) {
-        rd_avl_init(&rk->rk_metadata_cache.rkmc_avl,
-                    rd_kafka_metadata_cache_entry_cmp, 0);
-        TAILQ_INIT(&rk->rk_metadata_cache.rkmc_expiry);
-        mtx_init(&rk->rk_metadata_cache.rkmc_full_lock, mtx_plain);
-        mtx_init(&rk->rk_metadata_cache.rkmc_cnd_lock, mtx_plain);
-        cnd_init(&rk->rk_metadata_cache.rkmc_cnd);
-
-}
-
-/**
- * @brief Purge and destroy metadata cache
- *
- * @locks rd_kafka_wrlock()
- */
-void rd_kafka_metadata_cache_destroy (rd_kafka_t *rk) {
-        rd_kafka_timer_stop(&rk->rk_timers,
-                            &rk->rk_metadata_cache.rkmc_query_tmr, 1/*lock*/);
-        rd_kafka_metadata_cache_purge(rk);
-        mtx_destroy(&rk->rk_metadata_cache.rkmc_full_lock);
-        mtx_destroy(&rk->rk_metadata_cache.rkmc_cnd_lock);
-        cnd_destroy(&rk->rk_metadata_cache.rkmc_cnd);
-        rd_avl_destroy(&rk->rk_metadata_cache.rkmc_avl);
-}
-
-
-/**
- * @brief Wait for cache update, or timeout.
- *
- * @returns 1 on cache update or 0 on timeout.
- * @locks none
- * @locality any
- */
-int rd_kafka_metadata_cache_wait_change (rd_kafka_t *rk, int timeout_ms) {
-        int r;
-#if ENABLE_DEVEL
-        rd_ts_t ts_start = rd_clock();
-#endif
-        mtx_lock(&rk->rk_metadata_cache.rkmc_cnd_lock);
-        r = cnd_timedwait_ms(&rk->rk_metadata_cache.rkmc_cnd,
-                             &rk->rk_metadata_cache.rkmc_cnd_lock,
-                             timeout_ms);
-        mtx_unlock(&rk->rk_metadata_cache.rkmc_cnd_lock);
-
-#if ENABLE_DEVEL
-        rd_kafka_dbg(rk, METADATA, "CACHEWAIT",
-                     "%s wait took %dms: %s",
-                     __FUNCTION__, (int)((rd_clock() - ts_start)/1000),
-                     r == thrd_success ? "succeeded" : "timed out");
-#endif
-        return r == thrd_success;
-}
-
-/**
- * @brief Propagate that the cache changed (but not what changed) to
- *        any cnd listeners.
- * @locks none
- * @locality any
- */
-static void rd_kafka_metadata_cache_propagate_changes (rd_kafka_t *rk) {
-        mtx_lock(&rk->rk_metadata_cache.rkmc_cnd_lock);
-        cnd_broadcast(&rk->rk_metadata_cache.rkmc_cnd);
-        mtx_unlock(&rk->rk_metadata_cache.rkmc_cnd_lock);
-}
-
-/**
- * @returns the shared metadata for a topic, or NULL if not found in
- *          cache.
- *
- * @locks rd_kafka_*lock()
- */
-const rd_kafka_metadata_topic_t *
-rd_kafka_metadata_cache_topic_get (rd_kafka_t *rk, const char *topic,
-                                   int valid) {
-        struct rd_kafka_metadata_cache_entry *rkmce;
-
-        if (!(rkmce = rd_kafka_metadata_cache_find(rk, topic, valid)))
-                return NULL;
-
-        return &rkmce->rkmce_mtopic;
-}
-
-
-
-
-/**
- * @brief Looks up the shared metadata for a partition along with its topic.
- *
- * @param mtopicp: pointer to topic metadata
- * @param mpartp: pointer to partition metadata
- * @param valid: only return valid entries (no hints)
- *
- * @returns -1 if topic was not found in cache, 0 if topic was found
- *          but not the partition, 1 if both topic and partition was found.
- *
- * @locks rd_kafka_*lock()
- */
-int rd_kafka_metadata_cache_topic_partition_get (
-        rd_kafka_t *rk,
-        const rd_kafka_metadata_topic_t **mtopicp,
-        const rd_kafka_metadata_partition_t **mpartp,
-        const char *topic, int32_t partition, int valid) {
-
-        const rd_kafka_metadata_topic_t *mtopic;
-        const rd_kafka_metadata_partition_t *mpart;
-        rd_kafka_metadata_partition_t skel = { .id = partition };
-
-        *mtopicp = NULL;
-        *mpartp = NULL;
-
-        if (!(mtopic = rd_kafka_metadata_cache_topic_get(rk, topic, valid)))
-                return -1;
-
-        *mtopicp = mtopic;
-
-        /* Partitions array may be sparse so use bsearch lookup. */
-        mpart = bsearch(&skel, mtopic->partitions,
-                        mtopic->partition_cnt,
-                        sizeof(*mtopic->partitions),
-                        rd_kafka_metadata_partition_id_cmp);
-
-        if (!mpart)
-                return 0;
-
-        *mpartp = mpart;
-
-        return 1;
-}
-
-
-/**
- * @returns the number of topics in \p topics that are in the cache.
- *
- * @param topics rd_list(const char *): topic names
- * @param metadata_agep: age of oldest entry will be returned.
- *
- * @locks rd_kafka_*lock()
- */
-int rd_kafka_metadata_cache_topics_count_exists (rd_kafka_t *rk,
-                                                 const rd_list_t *topics,
-                                                 int *metadata_agep) {
-        const char *topic;
-        int i;
-        int cnt = 0;
-        int max_age = -1;
-
-        RD_LIST_FOREACH(topic, topics, i) {
-                const struct rd_kafka_metadata_cache_entry *rkmce;
-                int age;
-
-                if (!(rkmce = rd_kafka_metadata_cache_find(rk, topic,
-                                                           1/*valid only*/)))
-                        continue;
-
-                age = (int)((rd_clock() - rkmce->rkmce_ts_insert)/1000);
-                if (age > max_age)
-                        max_age = age;
-                cnt++;
-        }
-
-        *metadata_agep = max_age;
-
-        return cnt;
-
-}
-
-
-/**
- * @brief Copies any topics in \p src to \p dst that have a valid cache
- *        entry, or not in the cache at all.
- *
- *        In other words; hinted non-valid topics will not copied to \p dst.
- *
- * @returns the number of topics copied
- *
- * @locks rd_kafka_*lock()
- */
-int rd_kafka_metadata_cache_topics_filter_hinted (rd_kafka_t *rk,
-                                                  rd_list_t *dst,
-                                                  const rd_list_t *src) {
-        const char *topic;
-        int i;
-        int cnt = 0;
-
-
-        RD_LIST_FOREACH(topic, src, i) {
-                const struct rd_kafka_metadata_cache_entry *rkmce;
-
-                rkmce = rd_kafka_metadata_cache_find(rk, topic, 0/*any sort*/);
-                if (rkmce && !RD_KAFKA_METADATA_CACHE_VALID(rkmce))
-                        continue;
-
-                rd_list_add(dst, rd_strdup(topic));
-                cnt++;
-        }
-
-        return cnt;
-}
-
-
-
-/**
- * @brief Dump cache to \p fp
- *
- * @locks rd_kafka_*lock()
- */
-void rd_kafka_metadata_cache_dump (FILE *fp, rd_kafka_t *rk) {
-        const struct rd_kafka_metadata_cache *rkmc = &rk->rk_metadata_cache;
-        const struct rd_kafka_metadata_cache_entry *rkmce;
-        rd_ts_t now = rd_clock();
-
-        fprintf(fp,
-                "Metadata cache with %d entries:\n",
-                rkmc->rkmc_cnt);
-        TAILQ_FOREACH(rkmce, &rkmc->rkmc_expiry, rkmce_link) {
-                fprintf(fp,
-                        "  %s (inserted %dms ago, expires in %dms, "
-                        "%d partition(s), %s)%s%s\n",
-                        rkmce->rkmce_mtopic.topic,
-                        (int)((now - rkmce->rkmce_ts_insert)/1000),
-                        (int)((rkmce->rkmce_ts_expires - now)/1000),
-                        rkmce->rkmce_mtopic.partition_cnt,
-                        RD_KAFKA_METADATA_CACHE_VALID(rkmce) ? "valid":"hint",
-                        rkmce->rkmce_mtopic.err ? " error: " : "",
-                        rkmce->rkmce_mtopic.err ?
-                        rd_kafka_err2str(rkmce->rkmce_mtopic.err) : "");
-        }
-}
-
-/**@}*/

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_msg.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_msg.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_msg.c
deleted file mode 100644
index 7a7f557..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_msg.c
+++ /dev/null
@@ -1,800 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_msg.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_interceptor.h"
-#include "rdcrc32.h"
-#include "rdrand.h"
-#include "rdtime.h"
-
-#include "rdsysqueue.h"
-
-#include <stdarg.h>
-
-void rd_kafka_msg_destroy (rd_kafka_t *rk, rd_kafka_msg_t *rkm) {
-
-	if (rkm->rkm_flags & RD_KAFKA_MSG_F_ACCOUNT) {
-		rd_dassert(rk || rkm->rkm_rkmessage.rkt);
-		rd_kafka_curr_msgs_sub(
-			rk ? rk :
-			rd_kafka_topic_a2i(rkm->rkm_rkmessage.rkt)->rkt_rk,
-			1, rkm->rkm_len);
-	}
-
-	if (likely(rkm->rkm_rkmessage.rkt != NULL))
-		rd_kafka_topic_destroy0(
-                        rd_kafka_topic_a2s(rkm->rkm_rkmessage.rkt));
-
-	if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE && rkm->rkm_payload)
-		rd_free(rkm->rkm_payload);
-
-	if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE_RKM)
-		rd_free(rkm);
-}
-
-
-
-
-/**
- * @brief Create a new message, copying the payload as indicated by msgflags.
- *
- * @returns the new message
- */
-static
-rd_kafka_msg_t *rd_kafka_msg_new00 (rd_kafka_itopic_t *rkt,
-				    int32_t partition,
-				    int msgflags,
-				    char *payload, size_t len,
-				    const void *key, size_t keylen,
-				    void *msg_opaque) {
-	rd_kafka_msg_t *rkm;
-	size_t mlen = sizeof(*rkm);
-	char *p;
-
-	/* If we are to make a copy of the payload, allocate space for it too */
-	if (msgflags & RD_KAFKA_MSG_F_COPY) {
-		msgflags &= ~RD_KAFKA_MSG_F_FREE;
-		mlen += len;
-	}
-
-	mlen += keylen;
-
-	/* Note: using rd_malloc here, not rd_calloc, so make sure all fields
-	 *       are properly set up. */
-	rkm                 = rd_malloc(mlen);
-	rkm->rkm_err        = 0;
-	rkm->rkm_flags      = RD_KAFKA_MSG_F_FREE_RKM | msgflags;
-	rkm->rkm_len        = len;
-	rkm->rkm_opaque     = msg_opaque;
-	rkm->rkm_rkmessage.rkt = rd_kafka_topic_keep_a(rkt);
-
-	rkm->rkm_partition  = partition;
-        rkm->rkm_offset     = 0;
-	rkm->rkm_timestamp  = 0;
-	rkm->rkm_tstype     = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
-
-	p = (char *)(rkm+1);
-
-	if (payload && msgflags & RD_KAFKA_MSG_F_COPY) {
-		/* Copy payload to space following the ..msg_t */
-		rkm->rkm_payload = p;
-		memcpy(rkm->rkm_payload, payload, len);
-		p += len;
-
-	} else {
-		/* Just point to the provided payload. */
-		rkm->rkm_payload = payload;
-	}
-
-	if (key) {
-		rkm->rkm_key     = p;
-		rkm->rkm_key_len = keylen;
-		memcpy(rkm->rkm_key, key, keylen);
-	} else {
-		rkm->rkm_key = NULL;
-		rkm->rkm_key_len = 0;
-	}
-
-
-        return rkm;
-}
-
-
-
-
-/**
- * @brief Create a new message.
- *
- * @remark Must only be used by producer code.
- *
- * Returns 0 on success or -1 on error.
- * Both errno and 'errp' are set appropriately.
- */
-static rd_kafka_msg_t *rd_kafka_msg_new0 (rd_kafka_itopic_t *rkt,
-                                          int32_t force_partition,
-                                          int msgflags,
-                                          char *payload, size_t len,
-                                          const void *key, size_t keylen,
-                                          void *msg_opaque,
-                                          rd_kafka_resp_err_t *errp,
-                                          int *errnop,
-                                          int64_t timestamp,
-                                          rd_ts_t now) {
-	rd_kafka_msg_t *rkm;
-
-	if (unlikely(!payload))
-		len = 0;
-	if (!key)
-		keylen = 0;
-
-	if (unlikely(len + keylen >
-		     (size_t)rkt->rkt_rk->rk_conf.max_msg_size ||
-		     keylen > INT32_MAX)) {
-		*errp = RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE;
-		if (errnop)
-			*errnop = EMSGSIZE;
-		return NULL;
-	}
-
-	*errp = rd_kafka_curr_msgs_add(rkt->rkt_rk, 1, len,
-				       msgflags & RD_KAFKA_MSG_F_BLOCK);
-	if (unlikely(*errp)) {
-		if (errnop)
-			*errnop = ENOBUFS;
-		return NULL;
-	}
-
-
-	rkm = rd_kafka_msg_new00(rkt, force_partition,
-				 msgflags|RD_KAFKA_MSG_F_ACCOUNT /* curr_msgs_add() */,
-				 payload, len, key, keylen, msg_opaque);
-
-        if (timestamp)
-                rkm->rkm_timestamp  = timestamp;
-        else
-                rkm->rkm_timestamp = rd_uclock()/1000;
-        rkm->rkm_tstype     = RD_KAFKA_TIMESTAMP_CREATE_TIME;
-
-        rkm->rkm_ts_enq = now;
-
-	if (rkt->rkt_conf.message_timeout_ms == 0) {
-		rkm->rkm_ts_timeout = INT64_MAX;
-	} else {
-		rkm->rkm_ts_timeout = now +
-			rkt->rkt_conf.message_timeout_ms * 1000;
-	}
-
-        /* Call interceptor chain for on_send */
-        rd_kafka_interceptors_on_send(rkt->rkt_rk, &rkm->rkm_rkmessage);
-
-        return rkm;
-}
-
-
-/**
- * @brief Produce: creates a new message, runs the partitioner and enqueues
- *        into on the selected partition.
- *
- * @returns 0 on success or -1 on error.
- *
- * If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then
- * the memory associated with the payload is still the caller's
- * responsibility.
- *
- * @locks none
- */
-int rd_kafka_msg_new (rd_kafka_itopic_t *rkt, int32_t force_partition,
-		      int msgflags,
-		      char *payload, size_t len,
-		      const void *key, size_t keylen,
-		      void *msg_opaque) {
-	rd_kafka_msg_t *rkm;
-	rd_kafka_resp_err_t err;
-	int errnox;
-
-        /* Create message */
-        rkm = rd_kafka_msg_new0(rkt, force_partition, msgflags, 
-                                payload, len, key, keylen, msg_opaque,
-                                &err, &errnox, 0, rd_clock());
-        if (unlikely(!rkm)) {
-                /* errno is already set by msg_new() */
-		rd_kafka_set_last_error(err, errnox);
-                return -1;
-        }
-
-
-        /* Partition the message */
-	err = rd_kafka_msg_partitioner(rkt, rkm, 1);
-	if (likely(!err)) {
-		rd_kafka_set_last_error(0, 0);
-		return 0;
-	}
-
-        /* Interceptor: unroll failing messages by triggering on_ack.. */
-        rkm->rkm_err = err;
-        rd_kafka_interceptors_on_acknowledgement(rkt->rkt_rk,
-                                                 &rkm->rkm_rkmessage);
-
-	/* Handle partitioner failures: it only fails when the application
-	 * attempts to force a destination partition that does not exist
-	 * in the cluster.  Note we must clear the RD_KAFKA_MSG_F_FREE
-	 * flag since our contract says we don't free the payload on
-	 * failure. */
-
-	rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE;
-	rd_kafka_msg_destroy(rkt->rkt_rk, rkm);
-
-	/* Translate error codes to errnos. */
-	if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
-		rd_kafka_set_last_error(err, ESRCH);
-	else if (err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
-		rd_kafka_set_last_error(err, ENOENT);
-	else
-		rd_kafka_set_last_error(err, EINVAL); /* NOTREACHED */
-
-	return -1;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) {
-        va_list ap;
-        rd_kafka_msg_t s_rkm = {
-                /* Message defaults */
-                .rkm_partition = RD_KAFKA_PARTITION_UA,
-                .rkm_timestamp = 0, /* current time */
-        };
-        rd_kafka_msg_t *rkm = &s_rkm;
-        rd_kafka_vtype_t vtype;
-        rd_kafka_topic_t *app_rkt;
-        shptr_rd_kafka_itopic_t *s_rkt = NULL;
-        rd_kafka_itopic_t *rkt;
-        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
-        va_start(ap, rk);
-        while ((vtype = va_arg(ap, rd_kafka_vtype_t)) != RD_KAFKA_VTYPE_END) {
-                switch (vtype)
-                {
-                case RD_KAFKA_VTYPE_TOPIC:
-                        s_rkt = rd_kafka_topic_new0(rk,
-                                                    va_arg(ap, const char *),
-                                                    NULL, NULL, 1);
-                        break;
-
-                case RD_KAFKA_VTYPE_RKT:
-                        app_rkt = va_arg(ap, rd_kafka_topic_t *);
-                        s_rkt = rd_kafka_topic_keep(
-                                rd_kafka_topic_a2i(app_rkt));
-                        break;
-
-                case RD_KAFKA_VTYPE_PARTITION:
-                        rkm->rkm_partition = va_arg(ap, int32_t);
-                        break;
-
-                case RD_KAFKA_VTYPE_VALUE:
-                        rkm->rkm_payload = va_arg(ap, void *);
-                        rkm->rkm_len = va_arg(ap, size_t);
-                        break;
-
-                case RD_KAFKA_VTYPE_KEY:
-                        rkm->rkm_key = va_arg(ap, void *);
-                        rkm->rkm_key_len = va_arg(ap, size_t);
-                        break;
-
-                case RD_KAFKA_VTYPE_OPAQUE:
-                        rkm->rkm_opaque = va_arg(ap, void *);
-                        break;
-
-                case RD_KAFKA_VTYPE_MSGFLAGS:
-                        rkm->rkm_flags = va_arg(ap, int);
-                        break;
-
-                case RD_KAFKA_VTYPE_TIMESTAMP:
-                        rkm->rkm_timestamp = va_arg(ap, int64_t);
-                        break;
-
-                default:
-                        err = RD_KAFKA_RESP_ERR__INVALID_ARG;
-                        break;
-                }
-        }
-
-        va_end(ap);
-
-        if (unlikely(!s_rkt))
-                return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
-        rkt = rd_kafka_topic_s2i(s_rkt);
-
-        if (likely(!err))
-                rkm = rd_kafka_msg_new0(rkt,
-                                        rkm->rkm_partition,
-                                        rkm->rkm_flags,
-                                        rkm->rkm_payload, rkm->rkm_len,
-                                        rkm->rkm_key, rkm->rkm_key_len,
-                                        rkm->rkm_opaque,
-                                        &err, NULL,
-                                        rkm->rkm_timestamp, rd_clock());
-
-        if (unlikely(err))
-                return err;
-
-        /* Partition the message */
-        err = rd_kafka_msg_partitioner(rkt, rkm, 1);
-        if (unlikely(err)) {
-                /* Handle partitioner failures: it only fails when
-                 * the application attempts to force a destination
-                 * partition that does not exist in the cluster. */
-
-                /* Interceptors: Unroll on_send by on_ack.. */
-                rkm->rkm_err = err;
-                rd_kafka_interceptors_on_acknowledgement(rk,
-                                                         &rkm->rkm_rkmessage);
-
-                /* Note we must clear the RD_KAFKA_MSG_F_FREE
-                 * flag since our contract says we don't free the payload on
-                 * failure. */
-                rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE;
-
-                rd_kafka_msg_destroy(rk, rkm);
-        }
-
-        rd_kafka_topic_destroy0(s_rkt);
-
-        return err;
-}
-
-/**
- * Produce a batch of messages.
- * Returns the number of messages succesfully queued for producing.
- * Each message's .err will be set accordingly.
- */
-int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition,
-                            int msgflags,
-                            rd_kafka_message_t *rkmessages, int message_cnt) {
-        rd_kafka_msgq_t tmpq = RD_KAFKA_MSGQ_INITIALIZER(tmpq);
-        int i;
-	int64_t utc_now = rd_uclock() / 1000;
-        rd_ts_t now = rd_clock();
-        int good = 0;
-        rd_kafka_resp_err_t all_err = 0;
-        rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt);
-
-        /* For partitioner; hold lock for entire run,
-         * for one partition: only acquire when needed at the end. */
-	if (partition == RD_KAFKA_PARTITION_UA)
-		rd_kafka_topic_rdlock(rkt);
-
-        for (i = 0 ; i < message_cnt ; i++) {
-                rd_kafka_msg_t *rkm;
-
-                /* Propagate error for all messages. */
-                if (unlikely(all_err)) {
-                        rkmessages[i].err = all_err;
-                        continue;
-                }
-
-                /* Create message */
-                rkm = rd_kafka_msg_new0(rkt,
-                                        partition , msgflags,
-                                        rkmessages[i].payload,
-                                        rkmessages[i].len,
-                                        rkmessages[i].key,
-                                        rkmessages[i].key_len,
-                                        rkmessages[i]._private,
-                                        &rkmessages[i].err,
-					NULL, utc_now, now);
-                if (unlikely(!rkm)) {
-			if (rkmessages[i].err == RD_KAFKA_RESP_ERR__QUEUE_FULL)
-				all_err = rkmessages[i].err;
-                        continue;
-		}
-
-                /* Two cases here:
-                 *  partition==UA:     run the partitioner (slow)
-                 *  fixed partition:   simply concatenate the queue to partit */
-                if (partition == RD_KAFKA_PARTITION_UA) {
-                        /* Partition the message */
-                        rkmessages[i].err =
-                                rd_kafka_msg_partitioner(rkt, rkm,
-                                                         0/*already locked*/);
-
-                        if (unlikely(rkmessages[i].err)) {
-                                /* Interceptors: Unroll on_send by on_ack.. */
-                                rd_kafka_interceptors_on_acknowledgement(
-                                        rkt->rkt_rk, &rkmessages[i]);
-
-                                rd_kafka_msg_destroy(rkt->rkt_rk, rkm);
-                                continue;
-                        }
-
-
-                } else {
-                        /* Single destination partition, enqueue message
-                         * on temporary queue for later queue concat. */
-                        rd_kafka_msgq_enq(&tmpq, rkm);
-                }
-
-                rkmessages[i].err = RD_KAFKA_RESP_ERR_NO_ERROR;
-                good++;
-        }
-
-
-
-	/* Specific partition */
-        if (partition != RD_KAFKA_PARTITION_UA) {
-                shptr_rd_kafka_toppar_t *s_rktp;
-
-		rd_kafka_topic_rdlock(rkt);
-
-                s_rktp = rd_kafka_toppar_get_avail(rkt, partition,
-                                                   1/*ua on miss*/, &all_err);
-                /* Concatenate tmpq onto partition queue. */
-                if (likely(s_rktp != NULL)) {
-                        rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp);
-                        rd_atomic64_add(&rktp->rktp_c.msgs, good);
-                        rd_kafka_toppar_concat_msgq(rktp, &tmpq);
-                        rd_kafka_toppar_destroy(s_rktp);
-                }
-        }
-
-	rd_kafka_topic_rdunlock(rkt);
-
-        return good;
-}
-
-/**
- * Scan 'rkmq' for messages that have timed out and remove them from
- * 'rkmq' and add to 'timedout'.
- *
- * Returns the number of messages timed out.
- */
-int rd_kafka_msgq_age_scan (rd_kafka_msgq_t *rkmq,
-			    rd_kafka_msgq_t *timedout,
-			    rd_ts_t now) {
-	rd_kafka_msg_t *rkm, *tmp;
-	int cnt = rd_atomic32_get(&timedout->rkmq_msg_cnt);
-
-	/* Assume messages are added in time sequencial order */
-	TAILQ_FOREACH_SAFE(rkm, &rkmq->rkmq_msgs, rkm_link, tmp) {
-		if (likely(rkm->rkm_ts_timeout > now))
-			break;
-
-		rd_kafka_msgq_deq(rkmq, rkm, 1);
-		rd_kafka_msgq_enq(timedout, rkm);
-	}
-
-	return rd_atomic32_get(&timedout->rkmq_msg_cnt) - cnt;
-}
-
-
-
-
-
-int32_t rd_kafka_msg_partitioner_random (const rd_kafka_topic_t *rkt,
-					 const void *key, size_t keylen,
-					 int32_t partition_cnt,
-					 void *rkt_opaque,
-					 void *msg_opaque) {
-	int32_t p = rd_jitter(0, partition_cnt-1);
-	if (unlikely(!rd_kafka_topic_partition_available(rkt, p)))
-		return rd_jitter(0, partition_cnt-1);
-	else
-		return p;
-}
-
-int32_t rd_kafka_msg_partitioner_consistent (const rd_kafka_topic_t *rkt,
-                                             const void *key, size_t keylen,
-                                             int32_t partition_cnt,
-                                             void *rkt_opaque,
-                                             void *msg_opaque) {
-    return rd_crc32(key, keylen) % partition_cnt;
-}
-
-int32_t rd_kafka_msg_partitioner_consistent_random (const rd_kafka_topic_t *rkt,
-                                             const void *key, size_t keylen,
-                                             int32_t partition_cnt,
-                                             void *rkt_opaque,
-                                             void *msg_opaque) {
-    if (keylen == 0)
-      return rd_kafka_msg_partitioner_random(rkt,
-                                             key,
-                                             keylen,
-                                             partition_cnt,
-                                             rkt_opaque,
-                                             msg_opaque);
-    else
-      return rd_kafka_msg_partitioner_consistent(rkt,
-                                                 key,
-                                                 keylen,
-                                                 partition_cnt,
-                                                 rkt_opaque,
-                                                 msg_opaque);
-}
-
-
-/**
- * Assigns a message to a topic partition using a partitioner.
- * Returns RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or .._UNKNOWN_TOPIC if
- * partitioning failed, or 0 on success.
- */
-int rd_kafka_msg_partitioner (rd_kafka_itopic_t *rkt, rd_kafka_msg_t *rkm,
-			      int do_lock) {
-	int32_t partition;
-	rd_kafka_toppar_t *rktp_new;
-        shptr_rd_kafka_toppar_t *s_rktp_new;
-	rd_kafka_resp_err_t err;
-
-	if (do_lock)
-		rd_kafka_topic_rdlock(rkt);
-
-        switch (rkt->rkt_state)
-        {
-        case RD_KAFKA_TOPIC_S_UNKNOWN:
-                /* No metadata received from cluster yet.
-                 * Put message in UA partition and re-run partitioner when
-                 * cluster comes up. */
-		partition = RD_KAFKA_PARTITION_UA;
-                break;
-
-        case RD_KAFKA_TOPIC_S_NOTEXISTS:
-                /* Topic not found in cluster.
-                 * Fail message immediately. */
-                err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
-		if (do_lock)
-			rd_kafka_topic_rdunlock(rkt);
-                return err;
-
-        case RD_KAFKA_TOPIC_S_EXISTS:
-                /* Topic exists in cluster. */
-
-                /* Topic exists but has no partitions.
-                 * This is usually an transient state following the
-                 * auto-creation of a topic. */
-                if (unlikely(rkt->rkt_partition_cnt == 0)) {
-                        partition = RD_KAFKA_PARTITION_UA;
-                        break;
-                }
-
-                /* Partition not assigned, run partitioner. */
-                if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA) {
-                        rd_kafka_topic_t *app_rkt;
-                        /* Provide a temporary app_rkt instance to protect
-                         * from the case where the application decided to
-                         * destroy its topic object prior to delivery completion
-                         * (issue #502). */
-                        app_rkt = rd_kafka_topic_keep_a(rkt);
-                        partition = rkt->rkt_conf.
-                                partitioner(app_rkt,
-                                            rkm->rkm_key,
-					    rkm->rkm_key_len,
-                                            rkt->rkt_partition_cnt,
-                                            rkt->rkt_conf.opaque,
-                                            rkm->rkm_opaque);
-                        rd_kafka_topic_destroy0(
-                                rd_kafka_topic_a2s(app_rkt));
-                } else
-                        partition = rkm->rkm_partition;
-
-                /* Check that partition exists. */
-                if (partition >= rkt->rkt_partition_cnt) {
-                        err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-                        if (do_lock)
-                                rd_kafka_topic_rdunlock(rkt);
-                        return err;
-                }
-                break;
-
-        default:
-                rd_kafka_assert(rkt->rkt_rk, !*"NOTREACHED");
-                break;
-        }
-
-	/* Get new partition */
-	s_rktp_new = rd_kafka_toppar_get(rkt, partition, 0);
-
-	if (unlikely(!s_rktp_new)) {
-		/* Unknown topic or partition */
-		if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS)
-			err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
-		else
-			err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-
-		if (do_lock)
-			rd_kafka_topic_rdunlock(rkt);
-
-		return  err;
-	}
-
-        rktp_new = rd_kafka_toppar_s2i(s_rktp_new);
-        rd_atomic64_add(&rktp_new->rktp_c.msgs, 1);
-
-        /* Update message partition */
-        if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA)
-                rkm->rkm_partition = partition;
-
-	/* Partition is available: enqueue msg on partition's queue */
-	rd_kafka_toppar_enq_msg(rktp_new, rkm);
-	if (do_lock)
-		rd_kafka_topic_rdunlock(rkt);
-	rd_kafka_toppar_destroy(s_rktp_new); /* from _get() */
-	return 0;
-}
-
-
-
-
-/**
- * @name Public message type (rd_kafka_message_t)
- */
-void rd_kafka_message_destroy (rd_kafka_message_t *rkmessage) {
-        rd_kafka_op_t *rko;
-
-        if (likely((rko = (rd_kafka_op_t *)rkmessage->_private) != NULL))
-                rd_kafka_op_destroy(rko);
-        else {
-                rd_kafka_msg_t *rkm = rd_kafka_message2msg(rkmessage);
-                rd_kafka_msg_destroy(NULL, rkm);
-        }
-}
-
-
-rd_kafka_message_t *rd_kafka_message_new (void) {
-        rd_kafka_msg_t *rkm = rd_calloc(1, sizeof(*rkm));
-        return (rd_kafka_message_t *)rkm;
-}
-
-
-/**
- * @brief Set up a rkmessage from an rko for passing to the application.
- * @remark Will trigger on_consume() interceptors if any.
- */
-static rd_kafka_message_t *
-rd_kafka_message_setup (rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) {
-        rd_kafka_itopic_t *rkt;
-        rd_kafka_toppar_t *rktp = NULL;
-
-        if (rko->rko_type == RD_KAFKA_OP_DR) {
-                rkt = rd_kafka_topic_s2i(rko->rko_u.dr.s_rkt);
-        } else {
-                if (rko->rko_rktp) {
-                        rktp = rd_kafka_toppar_s2i(rko->rko_rktp);
-                        rkt = rktp->rktp_rkt;
-                } else
-                        rkt = NULL;
-
-                rkmessage->_private = rko;
-        }
-
-
-        if (!rkmessage->rkt && rkt)
-                rkmessage->rkt = rd_kafka_topic_keep_a(rkt);
-
-        if (rktp)
-                rkmessage->partition = rktp->rktp_partition;
-
-        if (!rkmessage->err)
-                rkmessage->err = rko->rko_err;
-
-        /* Call on_consume interceptors */
-        switch (rko->rko_type)
-        {
-        case RD_KAFKA_OP_FETCH:
-                if (!rkmessage->err && rkt)
-                        rd_kafka_interceptors_on_consume(rkt->rkt_rk,
-                                                         rkmessage);
-                break;
-
-        default:
-                break;
-        }
-
-        return rkmessage;
-}
-
-
-
-/**
- * @brief Get rkmessage from rkm (for EVENT_DR)
- * @remark Must only be called just prior to passing a dr to the application.
- */
-rd_kafka_message_t *rd_kafka_message_get_from_rkm (rd_kafka_op_t *rko,
-                                                   rd_kafka_msg_t *rkm) {
-        return rd_kafka_message_setup(rko, &rkm->rkm_rkmessage);
-}
-
-/**
- * @brief Convert rko to rkmessage
- * @remark Must only be called just prior to passing a consumed message
- *         or event to the application.
- * @remark Will trigger on_consume() interceptors, if any.
- * @returns a rkmessage (bound to the rko).
- */
-rd_kafka_message_t *rd_kafka_message_get (rd_kafka_op_t *rko) {
-        rd_kafka_message_t *rkmessage;
-
-        if (!rko)
-                return rd_kafka_message_new(); /* empty */
-
-        switch (rko->rko_type)
-        {
-        case RD_KAFKA_OP_FETCH:
-                /* Use embedded rkmessage */
-                rkmessage = &rko->rko_u.fetch.rkm.rkm_rkmessage;
-                break;
-
-        case RD_KAFKA_OP_ERR:
-        case RD_KAFKA_OP_CONSUMER_ERR:
-                rkmessage = &rko->rko_u.err.rkm.rkm_rkmessage;
-                rkmessage->payload = rko->rko_u.err.errstr;
-                rkmessage->offset  = rko->rko_u.err.offset;
-                break;
-
-        default:
-                rd_kafka_assert(NULL, !*"unhandled optype");
-                RD_NOTREACHED();
-                return NULL;
-        }
-
-        return rd_kafka_message_setup(rko, rkmessage);
-}
-
-
-int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage,
-                                    rd_kafka_timestamp_type_t *tstype) {
-        rd_kafka_msg_t *rkm;
-
-        if (rkmessage->err) {
-                if (tstype)
-                        *tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
-                return -1;
-        }
-
-        rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
-
-        if (tstype)
-                *tstype = rkm->rkm_tstype;
-
-        return rkm->rkm_timestamp;
-}
-
-
-int64_t rd_kafka_message_latency (const rd_kafka_message_t *rkmessage) {
-        rd_kafka_msg_t *rkm;
-
-        rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
-
-        if (unlikely(!rkm->rkm_ts_enq))
-                return -1;
-
-        return rd_clock() - rkm->rkm_ts_enq;
-}
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_msg.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_msg.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_msg.h
deleted file mode 100644
index 4f3f2f9..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_msg.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include "rdsysqueue.h"
-
-#include "rdkafka_proto.h"
-
-
-/**
- * @brief Message.MsgAttributes for MsgVersion v0..v1,
- *        also used for MessageSet.Attributes for MsgVersion v2.
- */
-#define RD_KAFKA_MSG_ATTR_GZIP             (1 << 0)
-#define RD_KAFKA_MSG_ATTR_SNAPPY           (1 << 1)
-#define RD_KAFKA_MSG_ATTR_LZ4              (3)
-#define RD_KAFKA_MSG_ATTR_COMPRESSION_MASK 0x3
-#define RD_KAFKA_MSG_ATTR_CREATE_TIME      (0 << 3)
-#define RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME  (1 << 3)
-
-
-/**
- * @brief MessageSet.Attributes for MsgVersion v2
- *
- * Attributes:
- *  -------------------------------------------------------------------------------------------------
- *  | Unused (6-15) | Control (5) | Transactional (4) | Timestamp Type (3) | Compression Type (0-2) |
- *  -------------------------------------------------------------------------------------------------
- */
-/* Compression types same as MsgVersion 0 above */
-/* Timestamp type same as MsgVersion 0 above */
-#define RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL (1 << 4)
-#define RD_KAFKA_MSGSET_V2_ATTR_CONTROL       (1 << 5)
-
-
-typedef struct rd_kafka_msg_s {
-	rd_kafka_message_t rkm_rkmessage;  /* MUST be first field */
-#define rkm_len               rkm_rkmessage.len
-#define rkm_payload           rkm_rkmessage.payload
-#define rkm_opaque            rkm_rkmessage._private
-#define rkm_partition         rkm_rkmessage.partition
-#define rkm_offset            rkm_rkmessage.offset
-#define rkm_key               rkm_rkmessage.key
-#define rkm_key_len           rkm_rkmessage.key_len
-#define rkm_err               rkm_rkmessage.err
-
-	TAILQ_ENTRY(rd_kafka_msg_s)  rkm_link;
-
-	int        rkm_flags;
-	/* @remark These additional flags must not collide with
-	 *         the RD_KAFKA_MSG_F_* flags in rdkafka.h */
-#define RD_KAFKA_MSG_F_FREE_RKM     0x10000 /* msg_t is allocated */
-#define RD_KAFKA_MSG_F_ACCOUNT      0x20000 /* accounted for in curr_msgs */
-
-	int64_t    rkm_timestamp;  /* Message format V1.
-				    * Meaning of timestamp depends on
-				    * message Attribute LogAppendtime (broker)
-				    * or CreateTime (producer).
-				    * Unit is milliseconds since epoch (UTC).*/
-	rd_kafka_timestamp_type_t rkm_tstype; /* rkm_timestamp type */
-
-        union {
-                struct {
-                        rd_ts_t ts_timeout; /* Message timeout */
-                        rd_ts_t ts_enq;     /* Enqueue/Produce time */
-                } producer;
-#define rkm_ts_timeout rkm_u.producer.ts_timeout
-#define rkm_ts_enq     rkm_u.producer.ts_enq
-        } rkm_u;
-} rd_kafka_msg_t;
-
-TAILQ_HEAD(rd_kafka_msg_head_s, rd_kafka_msg_s);
-
-
-/** @returns the absolute time a message was enqueued (producer) */
-#define rd_kafka_msg_enq_time(rkm) ((rkm)->rkm_ts_enq)
-
-/**
- * @returns the message's total maximum on-wire size.
- * @remark Depending on message version (MagicByte) the actual size
- *         may be smaller.
- */
-static RD_INLINE RD_UNUSED
-size_t rd_kafka_msg_wire_size (const rd_kafka_msg_t *rkm, int MsgVersion) {
-        static const size_t overheads[] = {
-                [0] = RD_KAFKAP_MESSAGE_V0_OVERHEAD,
-                [1] = RD_KAFKAP_MESSAGE_V1_OVERHEAD,
-                [2] = RD_KAFKAP_MESSAGE_V2_OVERHEAD
-        };
-        rd_dassert(MsgVersion >= 0 && MsgVersion <= 2);
-        return overheads[MsgVersion] + rkm->rkm_len + rkm->rkm_key_len;
-}
-
-
-/**
- * @returns the enveloping rd_kafka_msg_t pointer for a rd_kafka_msg_t
- *          wrapped rd_kafka_message_t.
- */
-static RD_INLINE RD_UNUSED
-rd_kafka_msg_t *rd_kafka_message2msg (rd_kafka_message_t *rkmessage) {
-	return (rd_kafka_msg_t *)rkmessage;
-}
-
-
-
-
-
-typedef struct rd_kafka_msgq_s {
-	TAILQ_HEAD(, rd_kafka_msg_s) rkmq_msgs;
-	rd_atomic32_t rkmq_msg_cnt;
-	rd_atomic64_t rkmq_msg_bytes;
-} rd_kafka_msgq_t;
-
-#define RD_KAFKA_MSGQ_INITIALIZER(rkmq) \
-	{ .rkmq_msgs = TAILQ_HEAD_INITIALIZER((rkmq).rkmq_msgs) }
-
-#define RD_KAFKA_MSGQ_FOREACH(elm,head) \
-	TAILQ_FOREACH(elm, &(head)->rkmq_msgs, rkm_link)
-
-/**
- * Returns the number of messages in the specified queue.
- */
-static RD_INLINE RD_UNUSED int rd_kafka_msgq_len (rd_kafka_msgq_t *rkmq) {
-	return (int)rd_atomic32_get(&rkmq->rkmq_msg_cnt);
-}
-
-/**
- * Returns the total number of bytes in the specified queue.
- */
-static RD_INLINE RD_UNUSED size_t rd_kafka_msgq_size (rd_kafka_msgq_t *rkmq) {
-	return (size_t)rd_atomic64_get(&rkmq->rkmq_msg_bytes);
-}
-
-
-void rd_kafka_msg_destroy (rd_kafka_t *rk, rd_kafka_msg_t *rkm);
-
-int rd_kafka_msg_new (rd_kafka_itopic_t *rkt, int32_t force_partition,
-		      int msgflags,
-		      char *payload, size_t len,
-		      const void *keydata, size_t keylen,
-		      void *msg_opaque);
-
-static RD_INLINE RD_UNUSED void rd_kafka_msgq_init (rd_kafka_msgq_t *rkmq) {
-	TAILQ_INIT(&rkmq->rkmq_msgs);
-	rd_atomic32_init(&rkmq->rkmq_msg_cnt, 0);
-	rd_atomic64_init(&rkmq->rkmq_msg_bytes, 0);
-}
-
-/**
- * Concat all elements of 'src' onto tail of 'dst'.
- * 'src' will be cleared.
- * Proper locks for 'src' and 'dst' must be held.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_msgq_concat (rd_kafka_msgq_t *dst,
-						   rd_kafka_msgq_t *src) {
-	TAILQ_CONCAT(&dst->rkmq_msgs, &src->rkmq_msgs, rkm_link);
-	rd_atomic32_add(&dst->rkmq_msg_cnt, rd_atomic32_get(&src->rkmq_msg_cnt));
-	rd_atomic64_add(&dst->rkmq_msg_bytes, rd_atomic64_get(&src->rkmq_msg_bytes));
-	rd_kafka_msgq_init(src);
-}
-
-/**
- * Move queue 'src' to 'dst' (overwrites dst)
- * Source will be cleared.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_msgq_move (rd_kafka_msgq_t *dst,
-						 rd_kafka_msgq_t *src) {
-	TAILQ_MOVE(&dst->rkmq_msgs, &src->rkmq_msgs, rkm_link);
-	rd_atomic32_set(&dst->rkmq_msg_cnt, rd_atomic32_get(&src->rkmq_msg_cnt));
-	rd_atomic64_set(&dst->rkmq_msg_bytes, rd_atomic64_get(&src->rkmq_msg_bytes));
-	rd_kafka_msgq_init(src);
-}
-
-
-/**
- * rd_free all msgs in msgq and reinitialize the msgq.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_msgq_purge (rd_kafka_t *rk,
-                                                    rd_kafka_msgq_t *rkmq) {
-	rd_kafka_msg_t *rkm, *next;
-
-	next = TAILQ_FIRST(&rkmq->rkmq_msgs);
-	while (next) {
-		rkm = next;
-		next = TAILQ_NEXT(next, rkm_link);
-
-		rd_kafka_msg_destroy(rk, rkm);
-	}
-
-	rd_kafka_msgq_init(rkmq);
-}
-
-
-/**
- * Remove message from message queue
- */
-static RD_INLINE RD_UNUSED 
-rd_kafka_msg_t *rd_kafka_msgq_deq (rd_kafka_msgq_t *rkmq,
-				   rd_kafka_msg_t *rkm,
-				   int do_count) {
-	if (likely(do_count)) {
-		rd_kafka_assert(NULL, rd_atomic32_get(&rkmq->rkmq_msg_cnt) > 0);
-		rd_kafka_assert(NULL, rd_atomic64_get(&rkmq->rkmq_msg_bytes) >= (int64_t)(rkm->rkm_len+rkm->rkm_key_len));
-		rd_atomic32_sub(&rkmq->rkmq_msg_cnt, 1);
-		rd_atomic64_sub(&rkmq->rkmq_msg_bytes,
-				rkm->rkm_len+rkm->rkm_key_len);
-	}
-
-	TAILQ_REMOVE(&rkmq->rkmq_msgs, rkm, rkm_link);
-
-	return rkm;
-}
-
-static RD_INLINE RD_UNUSED
-rd_kafka_msg_t *rd_kafka_msgq_pop (rd_kafka_msgq_t *rkmq) {
-	rd_kafka_msg_t *rkm;
-
-	if (((rkm = TAILQ_FIRST(&rkmq->rkmq_msgs))))
-		rd_kafka_msgq_deq(rkmq, rkm, 1);
-
-	return rkm;
-}
-
-/**
- * Insert message at head of message queue.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_msgq_insert (rd_kafka_msgq_t *rkmq,
-						   rd_kafka_msg_t *rkm) {
-	TAILQ_INSERT_HEAD(&rkmq->rkmq_msgs, rkm, rkm_link);
-	rd_atomic32_add(&rkmq->rkmq_msg_cnt, 1);
-	rd_atomic64_add(&rkmq->rkmq_msg_bytes, rkm->rkm_len+rkm->rkm_key_len);
-}
-
-/**
- * Append message to tail of message queue.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_msgq_enq (rd_kafka_msgq_t *rkmq,
-						rd_kafka_msg_t *rkm) {
-	TAILQ_INSERT_TAIL(&rkmq->rkmq_msgs, rkm, rkm_link);
-	rd_atomic32_add(&rkmq->rkmq_msg_cnt, 1);
-	rd_atomic64_add(&rkmq->rkmq_msg_bytes, rkm->rkm_len+rkm->rkm_key_len);
-}
-
-
-/**
- * Scans a message queue for timed out messages and removes them from
- * 'rkmq' and adds them to 'timedout', returning the number of timed out
- * messages.
- * 'timedout' must be initialized.
- */
-int rd_kafka_msgq_age_scan (rd_kafka_msgq_t *rkmq,
-			    rd_kafka_msgq_t *timedout,
-			    rd_ts_t now);
-
-
-int rd_kafka_msg_partitioner (rd_kafka_itopic_t *rkt, rd_kafka_msg_t *rkm,
-                              int do_lock);
-
-
-rd_kafka_message_t *rd_kafka_message_get (struct rd_kafka_op_s *rko);
-rd_kafka_message_t *rd_kafka_message_get_from_rkm (struct rd_kafka_op_s *rko,
-                                                   rd_kafka_msg_t *rkm);
-rd_kafka_message_t *rd_kafka_message_new (void);
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_msgset.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_msgset.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_msgset.h
deleted file mode 100644
index 6edee1b..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_msgset.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-
-/**
- * @name MessageSet writers
- */
-rd_kafka_buf_t *
-rd_kafka_msgset_create_ProduceRequest (rd_kafka_broker_t *rkb,
-                                       rd_kafka_toppar_t *rktp,
-                                       size_t *MessageSetSizep);
-
-/**
- * @name MessageSet readers
- */
-rd_kafka_resp_err_t
-rd_kafka_msgset_parse (rd_kafka_buf_t *rkbuf,
-                       rd_kafka_buf_t *request,
-                       rd_kafka_toppar_t *rktp,
-                       const struct rd_kafka_toppar_ver *tver);


[09/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/INTRODUCTION.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/INTRODUCTION.md b/thirdparty/librdkafka-0.11.4/INTRODUCTION.md
new file mode 100644
index 0000000..9b712a5
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/INTRODUCTION.md
@@ -0,0 +1,735 @@
+//@file INTRODUCTION.md
+# Introduction to librdkafka - the Apache Kafka C/C++ client library
+
+
+librdkafka is a high performance C implementation of the Apache
+Kafka client, providing a reliable and performant client for production use.
+librdkafka also provides a native C++ interface.
+
+## Contents
+
+The following chapters are available in this document
+
+  * Performance
+    * Performance numbers
+    * High throughput
+    * Low latency
+    * Compression
+  * Message reliability
+  * Usage
+    * Documentation
+    * Initialization
+    * Configuration
+    * Threads and callbacks
+    * Brokers
+    * Producer API
+    * Consumer API
+  * Appendix
+    * Test detailts
+  
+
+
+
+## Performance
+
+librdkafka is a multi-threaded library designed for use on modern hardware and
+it attempts to keep memory copying at a minimal. The payload of produced or
+consumed messages may pass through without any copying
+(if so desired by the application) putting no limit on message sizes.
+
+librdkafka allows you to decide if high throughput is the name of the game,
+or if a low latency service is required, all through the configuration
+property interface.
+
+The two most important configuration properties for performance tuning are:
+
+  * `batch.num.messages` - the minimum number of messages to wait for to
+	  accumulate in the local queue before sending off a message set.
+  * `queue.buffering.max.ms` - how long to wait for batch.num.messages to
+	  fill up in the local queue. A lower value improves latency at the
+          cost of lower throughput and higher per-message overhead.
+          A higher value improves throughput at the expense of latency.
+          The recommended value for high throughput is > 50ms.
+
+
+### Performance numbers
+
+The following performance numbers stem from tests using the following setup:
+
+  * Intel Quad Core i7 at 3.4GHz, 8GB of memory
+  * Disk performance has been shortcut by setting the brokers' flush
+	configuration properties as so:
+	* `log.flush.interval.messages=10000000`
+	* `log.flush.interval.ms=100000`
+  * Two brokers running on the same machine as librdkafka.
+  * One topic with two partitions.
+  * Each broker is leader for one partition each.
+  * Using `rdkafka_performance` program available in the `examples` subdir.
+
+
+
+	
+
+**Test results**
+
+  * **Test1**: 2 brokers, 2 partitions, required.acks=2, 100 byte messages: 
+	  **850000 messages/second**, **85 MB/second**
+
+  * **Test2**: 1 broker, 1 partition, required.acks=0, 100 byte messages: 
+	  **710000 messages/second**, **71 MB/second**
+	  
+  * **Test3**: 2 broker2, 2 partitions, required.acks=2, 100 byte messages,
+	  snappy compression:
+	  **300000 messages/second**, **30 MB/second**
+
+  * **Test4**: 2 broker2, 2 partitions, required.acks=2, 100 byte messages,
+	  gzip compression:
+	  **230000 messages/second**, **23 MB/second**
+
+
+
+**Note**: See the *Test details* chapter at the end of this document for
+	information about the commands executed, etc.
+
+**Note**: Consumer performance tests will be announced soon.
+
+
+### High throughput
+
+The key to high throughput is message batching - waiting for a certain amount
+of messages to accumulate in the local queue before sending them off in
+one large message set or batch to the peer. This amortizes the messaging
+overhead and eliminates the adverse effect of the round trip time (rtt).
+
+`queue.buffering.max.ms` (also called `linger.ms`) allows librdkafka to
+wait up to the specified amount of time to accumulate up to
+`batch.num.messages` in a single batch (MessageSet) before sending
+to the broker. The larger the batch the higher the throughput.
+Enabling `msg` debugging (set `debug` property to `msg`) will emit log
+messages for the accumulation process which lets you see what batch sizes
+are being produced.
+
+Example using `queue.buffering.max.ms=1`:
+
+```
+... test [0]: MessageSet with 1514 message(s) delivered
+... test [3]: MessageSet with 1690 message(s) delivered
+... test [0]: MessageSet with 1720 message(s) delivered
+... test [3]: MessageSet with 2 message(s) delivered
+... test [3]: MessageSet with 4 message(s) delivered
+... test [0]: MessageSet with 4 message(s) delivered
+... test [3]: MessageSet with 11 message(s) delivered
+```
+
+Example using `queue.buffering.max.ms=1000`:
+```
+... test [0]: MessageSet with 10000 message(s) delivered
+... test [0]: MessageSet with 10000 message(s) delivered
+... test [0]: MessageSet with 4667 message(s) delivered
+... test [3]: MessageSet with 10000 message(s) delivered
+... test [3]: MessageSet with 10000 message(s) delivered
+... test [3]: MessageSet with 4476 message(s) delivered
+
+```
+
+
+The default setting of `queue.buffering.max.ms=1` is not suitable for
+high throughput, it is recommended to set this value to >50ms, with
+throughput leveling out somewhere around 100-1000ms depending on
+message produce pattern and sizes.
+
+These setting are set globally (`rd_kafka_conf_t`) but applies on a
+per topic+partition basis.
+
+
+### Low latency
+
+When low latency messaging is required the `queue.buffering.max.ms` should be
+tuned to the maximum permitted producer-side latency.
+Setting queue.buffering.max.ms to 1 will make sure messages are sent as
+soon as possible. You could check out [How to decrease message latency](https://github.com/edenhill/librdkafka/wiki/How-to-decrease-message-latency)
+to find more details.
+Lower buffering time leads to smaller batches and larger per-message overheads,
+increasing network, memory and CPU usage for producers, brokers and consumers.
+
+
+### Compression
+
+Producer message compression is enabled through the `compression.codec`
+configuration property.
+
+Compression is performed on the batch of messages in the local queue, the
+larger the batch the higher likelyhood of a higher compression ratio.
+The local batch queue size is controlled through the `batch.num.messages` and
+`queue.buffering.max.ms` configuration properties as described in the
+**High throughput** chapter above.
+
+
+
+## Message reliability
+
+Message reliability is an important factor of librdkafka - an application
+can rely fully on librdkafka to deliver a message according to the specified
+configuration (`request.required.acks` and `message.send.max.retries`, etc).
+
+If the topic configuration property `request.required.acks` is set to wait
+for message commit acknowledgements from brokers (any value but 0, see
+[`CONFIGURATION.md`](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
+for specifics) then librdkafka will hold on to the message until
+all expected acks have been received, gracefully handling the following events:
+
+  * Broker connection failure
+  * Topic leader change
+  * Produce errors signaled by the broker
+  * Network problems
+
+This is handled automatically by librdkafka and the application does not need
+to take any action at any of the above events.
+The message will be resent up to `message.send.max.retries` times before
+reporting a failure back to the application.
+
+The delivery report callback is used by librdkafka to signal the status of
+a message back to the application, it will be called once for each message
+to report the status of message delivery:
+
+  * If `error_code` is non-zero the message delivery failed and the error_code
+    indicates the nature of the failure (`rd_kafka_resp_err_t` enum).
+  * If `error_code` is zero the message has been successfully delivered.
+
+See Producer API chapter for more details on delivery report callback usage.
+
+The delivery report callback is optional but highly recommended.
+
+
+### Producer message delivery success
+
+When a ProduceRequest is successfully handled by the broker and a
+ProduceResponse is received (also called the ack) without an error code
+the messages from the ProduceRequest are enqueued on the delivery report
+queue (if a delivery report callback has been set) and will be passed to
+the application on the next invocation rd_kafka_poll().
+
+
+### Producer message delivery failure
+
+The following sub-chapters explains how different produce errors
+are handled.
+
+If the error is retryable and there are remaining retry attempts for
+the given message(s), an automatic retry will be scheduled by librdkafka,
+these retries are not visible to the application.
+
+Only permanent errors and temporary errors that have reached their maximum
+retry count will generate a delivery report event to the application with an
+error code set.
+
+The application should typically not attempt to retry producing the message
+on failure, but instead configure librdkafka to perform these retries
+using the `retries` and `retry.backoff.ms` configuration properties.
+
+
+#### Error: Timed out in transmission queue
+
+Internal error ERR__TIMED_OUT_QUEUE.
+
+The connectivity to the broker may be stalled due to networking contention,
+local or remote system issues, etc, and the request has not yet been sent.
+
+The producer can be certain that the message has not been sent to the broker.
+
+This is a retryable error, but is not counted as a retry attempt
+since the message was never actually transmitted.
+
+A retry by librdkafka at this point will not cause duplicate messages.
+
+
+#### Error: Timed out in flight to/from broker
+
+Internal error ERR__TIMED_OUT, ERR__TRANSPORT.
+
+Same reasons as for `Timed out in transmission queue` above, with the
+difference that the message may have been sent to the broker and might
+be stalling waiting for broker replicas to ack the message, or the response
+could be stalled due to networking issues.
+At this point the producer can't know if the message reached the broker,
+nor if the broker wrote the message to disk and replicas.
+
+This is a retryable error.
+
+A retry by librdkafka at this point may cause duplicate messages.
+
+
+#### Error: Temporary broker-side error
+
+Broker errors ERR_REQUEST_TIMED_OUT, ERR_NOT_ENOUGH_REPLICAS,
+ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND.
+
+These errors are considered temporary and librdkafka is will retry them
+if permitted by configuration.
+
+
+#### Error: Temporary errors due to stale metadata
+
+Broker errors ERR_LEADER_NOT_AVAILABLE, ERR_NOT_LEADER_FOR_PARTITION.
+
+These errors are considered temporary and a retry is warranted, a metadata
+request is automatically sent to find a new leader for the partition.
+
+A retry by librdkafka at this point will not cause duplicate messages.
+
+
+#### Error: Local time out
+
+Internal error ERR__MSG_TIMED_OUT.
+
+The message could not be successfully transmitted before `message.timeout.ms`
+expired, typically due to no leader being available or no broker connection.
+The message may have been retried due to other errors but
+those error messages are abstracted by the ERR__MSG_TIMED_OUT error code.
+
+Since the `message.timeout.ms` has passed there will be no more retries
+by librdkafka.
+
+
+#### Error: Permanent errors
+
+Any other error is considered a permanent error and the message
+will fail immediately, generating a delivery report event with the
+distinctive error code.
+
+The full list of permanent errors depend on the broker version and
+will likely grow in the future.
+
+Typical permanent broker errors are:
+ * ERR_CORRUPT_MESSAGE
+ * ERR_MSG_SIZE_TOO_LARGE  - adjust client's or broker's `message.max.bytes`.
+ * ERR_UNKNOWN_TOPIC_OR_PART - topic or partition does not exist,
+                               automatic topic creation is disabled on the
+                               broker or the application is specifying a
+                               partition that does not exist.
+ * ERR_RECORD_LIST_TOO_LARGE
+ * ERR_INVALID_REQUIRED_ACKS
+ * ERR_TOPIC_AUTHORIZATION_FAILED
+ * ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT
+ * ERR_CLUSTER_AUTHORIZATION_FAILED
+
+
+### Producer retries
+
+The ProduceRequest itself is not retried, instead the messages
+are put back on the internal partition queue by an insert sort
+that maintains their original position (the message order is defined
+at the time a message is initially appended to a partition queue, i.e., after
+partitioning).
+A backoff time (`retry.backoff.ms`) is set on the retried messages which
+effectively blocks retry attempts until the backoff time has expired.
+
+
+### Reordering
+
+As for all retries, if `max.in.flight` > 1 and `retries` > 0, retried messages
+may be produced out of order, since a sub-sequent message in a sub-sequent
+ProduceRequest may already be in-flight (and accepted by the broker)
+by the time the retry for the failing message is sent.
+
+
+
+
+## Usage
+
+### Documentation
+
+The librdkafka API is documented in the
+[`rdkafka.h`](https://github.com/edenhill/librdkafka/blob/master/src/rdkafka.h)
+header file, the configuration properties are documented in
+[`CONFIGURATION.md`](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
+
+### Initialization
+
+The application needs to instantiate a top-level object `rd_kafka_t` which is
+the base container, providing global configuration and shared state.
+It is created by calling `rd_kafka_new()`.
+
+It also needs to instantiate one or more topics (`rd_kafka_topic_t`) to be used
+for producing to or consuming from. The topic object holds topic-specific
+configuration and will be internally populated with a mapping of all available
+partitions and their leader brokers.
+It is created by calling `rd_kafka_topic_new()`.
+
+Both `rd_kafka_t` and `rd_kafka_topic_t` comes with a configuration API which
+is optional.
+Not using the API will cause librdkafka to use its default values which are
+documented in [`CONFIGURATION.md`](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md).
+
+**Note**: An application may create multiple `rd_kafka_t` objects and
+	they share no state.
+
+**Note**: An `rd_kafka_topic_t` object may only be used with the `rd_kafka_t`
+	object it was created from.
+
+
+
+### Configuration
+
+To ease integration with the official Apache Kafka software and lower
+the learning curve, librdkafka implements identical configuration
+properties as found in the official clients of Apache Kafka.
+
+Configuration is applied prior to object creation using the
+`rd_kafka_conf_set()` and `rd_kafka_topic_conf_set()` APIs.
+
+**Note**: The `rd_kafka.._conf_t` objects are not reusable after they have been
+	passed to `rd_kafka.._new()`.
+	The application does not need to free any config resources after a
+	`rd_kafka.._new()` call.
+
+#### Example
+
+    rd_kafka_conf_t *conf;
+    char errstr[512];
+    
+    conf = rd_kafka_conf_new();
+    rd_kafka_conf_set(conf, "compression.codec", "snappy", errstr, sizeof(errstr));
+    rd_kafka_conf_set(conf, "batch.num.messages", "100", errstr, sizeof(errstr));
+    
+    rd_kafka_new(RD_KAFKA_PRODUCER, conf);
+
+
+### Threads and callbacks
+
+librdkafka uses multiple threads internally to fully utilize modern hardware.
+The API is completely thread-safe and the calling application may call any
+of the API functions from any of its own threads at any time.
+
+A poll-based API is used to provide signaling back to the application,
+the application should call rd_kafka_poll() at regular intervals.
+The poll API will call the following configured callbacks (optional):
+
+  * message delivery report callback - signals that a message has been
+    delivered or failed delivery, allowing the application to take action
+    and to release any application resources used in the message.
+  * error callback - signals an error. These errors are usually of an
+    informational nature, i.e., failure to connect to a broker, and the
+    application usually does not need to take any action.
+    The type of error is passed as a rd_kafka_resp_err_t enum value,
+    including both remote broker errors as well as local failures.
+
+
+Optional callbacks not triggered by poll, these may be called from any thread:
+
+  * Logging callback - allows the application to output log messages
+	  generated by librdkafka.
+  * partitioner callback - application provided message partitioner.
+	  The partitioner may be called in any thread at any time, it may be
+	  called multiple times for the same key.
+	  Partitioner function contraints:
+	  * MUST NOT call any rd_kafka_*() functions
+      * MUST NOT block or execute for prolonged periods of time.
+      * MUST return a value between 0 and partition_cnt-1, or the
+          special RD_KAFKA_PARTITION_UA value if partitioning
+              could not be performed.
+
+
+
+### Brokers
+
+librdkafka only needs an initial list of brokers (at least one), called the
+bootstrap brokers.
+It will connect to all the bootstrap brokers, specified by the
+`metadata.broker.list` configuration property or by `rd_kafka_brokers_add()`,
+and query each one for Metadata information which contains the full list of
+brokers, topic, partitions and their leaders in the Kafka cluster.
+
+Broker names are specified as `host[:port]` where the port is optional 
+(default 9092) and the host is either a resolvable hostname or an IPv4 or IPv6
+address.
+If host resolves to multiple addresses librdkafka will round-robin the
+addresses for each connection attempt.
+A DNS record containing all broker address can thus be used to provide a
+reliable bootstrap broker.
+
+### Feature discovery
+
+Apache Kafka broker version 0.10.0 added support for the ApiVersionRequest API
+which allows a client to query a broker for its range of supported API versions.
+
+librdkafka supports this functionality and will query each broker on connect
+for this information (if `api.version.request=true`) and use it to enable or disable
+various protocol features, such as MessageVersion 1 (timestamps), KafkaConsumer, etc.
+
+If the broker fails to respond to the ApiVersionRequest librdkafka will
+assume the broker is too old to support the API and fall back to an older
+broker version's API. These fallback versions are hardcoded in librdkafka
+and is controlled by the `broker.version.fallback` configuration property.
+
+
+
+### Producer API
+
+After setting up the `rd_kafka_t` object with type `RD_KAFKA_PRODUCER` and one
+or more `rd_kafka_topic_t` objects librdkafka is ready for accepting messages
+to be produced and sent to brokers.
+
+The `rd_kafka_produce()` function takes the following arguments:
+
+  * `rkt` - the topic to produce to, previously created with
+	  `rd_kafka_topic_new()`
+  * `partition` - partition to produce to. If this is set to
+	  `RD_KAFKA_PARTITION_UA` (UnAssigned) then the configured partitioner
+		  function will be used to select a target partition.
+  * `msgflags` - 0, or one of:
+	  * `RD_KAFKA_MSG_F_COPY` - librdkafka will immediately make a copy of
+	    the payload. Use this when the payload is in non-persistent
+	    memory, such as the stack.
+	  * `RD_KAFKA_MSG_F_FREE` - let librdkafka free the payload using
+	    `free(3)` when it is done with it.
+	
+	These two flags are mutually exclusive and neither need to be set in
+	which case the payload is neither copied nor freed by librdkafka.
+		
+	If `RD_KAFKA_MSG_F_COPY` flag is not set no data copying will be
+	performed and librdkafka will hold on the payload pointer until
+	the message	has been delivered or fails.
+	The delivery report callback will be called when librdkafka is done
+	with the message to let the application regain ownership of the
+	payload memory.
+	The application must not free the payload in the delivery report
+	callback if `RD_KAFKA_MSG_F_FREE is set`.
+  * `payload`,`len` - the message payload
+  * `key`,`keylen` - an optional message key which can be used for partitioning.
+	  It will be passed to the topic partitioner callback, if any, and
+	  will be attached to the message when sending to the broker.
+  * `msg_opaque` - an optional application-provided per-message opaque pointer
+	  that will be provided in the message delivery callback to let
+	  the application reference a specific message.
+
+
+`rd_kafka_produce()` is a non-blocking API, it will enqueue the message
+on an internal queue and return immediately.
+If the number of queued messages would exceed the `queue.buffering.max.messages`
+configuration property then `rd_kafka_produce()` returns -1 and sets errno
+to `ENOBUFS` and last_error to `RD_KAFKA_RESP_ERR__QUEUE_FULL`, thus
+providing a backpressure mechanism.
+
+
+**Note**: See `examples/rdkafka_performance.c` for a producer implementation.
+
+
+### Simple Consumer API (legacy)
+
+NOTE: For the high-level KafkaConsumer interface see rd_kafka_subscribe (rdkafka.h) or KafkaConsumer (rdkafkacpp.h)
+
+The consumer API is a bit more stateful than the producer API.
+After creating `rd_kafka_t` with type `RD_KAFKA_CONSUMER` and
+`rd_kafka_topic_t` instances the application must also start the consumer
+for a given partition by calling `rd_kafka_consume_start()`.
+
+`rd_kafka_consume_start()` arguments:
+
+  * `rkt` - the topic to start consuming from, previously created with
+    	  `rd_kafka_topic_new()`.
+  * `partition` - partition to consume from.
+  * `offset` - message offset to start consuming from. This may either be an
+    	     absolute message offset or one of the two special offsets:
+	     `RD_KAFKA_OFFSET_BEGINNING` to start consuming from the beginning
+	     of the partition's queue (oldest message), or
+	     `RD_KAFKA_OFFSET_END` to start consuming at the next message to be
+	     produced to the partition, or
+	     `RD_KAFKA_OFFSET_STORED` to use the offset store.
+
+After a topic+partition consumer has been started librdkafka will attempt
+to keep `queued.min.messages` messages in the local queue by repeatedly
+fetching batches of messages from the broker.
+
+This local message queue is then served to the application through three
+different consume APIs:
+
+  * `rd_kafka_consume()` - consumes a single message
+  * `rd_kafka_consume_batch()` - consumes one or more messages
+  * `rd_kafka_consume_callback()` - consumes all messages in the local
+    queue and calls a callback function for each one.
+
+These three APIs are listed above the ascending order of performance,
+`rd_kafka_consume()` being the slowest and `rd_kafka_consume_callback()` being
+the fastest. The different consume variants are provided to cater for different
+application needs.
+
+A consumed message, as provided or returned by each of the consume functions,
+is represented by the `rd_kafka_message_t` type.
+
+`rd_kafka_message_t` members:
+
+  * `err` - Error signaling back to the application. If this field is non-zero
+    	  the `payload` field should be considered an error message and
+	  `err` is an error code (`rd_kafka_resp_err_t`).
+	  If `err` is zero then the message is a proper fetched message
+	  and `payload` et.al contains message payload data.
+  * `rkt`,`partition` - Topic and partition for this message or error.
+  * `payload`,`len` - Message payload data or error message (err!=0).
+  * `key`,`key_len` - Optional message key as specified by the producer
+  * `offset` - Message offset
+
+Both the `payload` and `key` memory, as well as the message as a whole, is
+owned by librdkafka and must not be used after an `rd_kafka_message_destroy()`
+call. librdkafka will share the same messageset receive buffer memory for all
+message payloads of that messageset to avoid excessive copying which means
+that if the application decides to hang on to a single `rd_kafka_message_t`
+it will hinder the backing memory to be released for all other messages
+from the same messageset.
+
+When the application is done consuming messages from a topic+partition it
+should call `rd_kafka_consume_stop()` to stop the consumer. This will also
+purge any messages currently in the local queue.
+
+
+**Note**: See `examples/rdkafka_performance.c` for a consumer implementation.
+
+
+#### Offset management
+
+Broker based offset management is available for broker version >= 0.9.0
+in conjunction with using the high-level KafkaConsumer interface (see
+rdkafka.h or rdkafkacpp.h)
+
+Offset management is also available through a local offset file store, where the
+offset is periodically written to a local file for each topic+partition
+according to the following topic configuration properties:
+
+  * `auto.commit.enable`
+  * `auto.commit.interval.ms`
+  * `offset.store.path`
+  * `offset.store.sync.interval.ms`
+
+There is currently no support for offset management with ZooKeeper.
+
+
+
+#### Consumer groups
+
+Broker based consumer groups (requires Apache Kafka broker >=0.9) are supported,
+see KafkaConsumer in rdkafka.h or rdkafkacpp.h
+
+
+### Topics
+
+#### Topic auto creation
+
+Topic auto creation is supported by librdkafka.
+The broker needs to be configured with `auto.create.topics.enable=true`.
+
+
+
+### Metadata
+
+#### < 0.9.3
+Previous to the 0.9.3 release librdkafka's metadata handling
+was chatty and excessive, which usually isn't a problem in small
+to medium-sized clusters, but in large clusters with a large amount
+of librdkafka clients the metadata requests could hog broker CPU and bandwidth.
+
+#### > 0.9.3
+
+The remaining Metadata sections describe the current behaviour.
+
+**Note:** "Known topics" in the following section means topics for
+          locally created `rd_kafka_topic_t` objects.
+
+
+#### Query reasons
+
+There are four reasons to query metadata:
+
+ * brokers - update/populate cluster broker list, so the client can
+             find and connect to any new brokers added.
+
+ * specific topic - find leader or partition count for specific topic
+
+ * known topics - same, but for all locally known topics.
+
+ * all topics - get topic names for consumer group wildcard subscription
+                matching
+
+The above list is sorted so that the sub-sequent entries contain the
+information above, e.g., 'known topics' contains enough information to
+also satisfy 'specific topic' and 'brokers'.
+
+
+#### Caching strategy
+
+The prevalent cache timeout is `metadata.max.age.ms`, any cached entry
+will remain authoritative for this long or until a relevant broker error
+is returned.
+
+
+ * brokers - eternally cached, the broker list is additative.
+
+ * topics - cached for `metadata.max.age.ms`
+
+
+
+
+## Appendix
+
+### Test details
+
+#### Test1: Produce to two brokers, two partitions, required.acks=2, 100 byte messages
+
+Each broker is leader for one of the two partitions.
+The random partitioner is used (default) and each broker and partition is
+assigned approximately 250000 messages each.
+
+**Command:**
+
+    # examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test1:TwoBrokers:500kmsgs:100bytes" -S 1 -a 2
+	....
+    % 500000 messages and 50000000 bytes sent in 587ms: 851531 msgs/s and 85.15 Mb/s, 0 messages failed, no compression
+
+**Result:**
+
+Message transfer rate is approximately **850000 messages per second**,
+**85 megabytes per second**.
+
+
+
+#### Test2: Produce to one broker, one partition, required.acks=0, 100 byte messages
+
+**Command:**
+
+    # examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test2:OneBrokers:500kmsgs:100bytes" -S 1 -a 0 -p 1
+	....
+	% 500000 messages and 50000000 bytes sent in 698ms: 715994 msgs/s and 71.60 Mb/s, 0 messages failed, no compression
+
+**Result:**
+
+Message transfer rate is approximately **710000 messages per second**,
+**71 megabytes per second**.
+
+
+
+#### Test3: Produce to two brokers, two partitions, required.acks=2, 100 byte messages, snappy compression
+
+**Command:**
+
+	# examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test3:TwoBrokers:500kmsgs:100bytes:snappy" -S 1 -a 2 -z snappy
+	....
+	% 500000 messages and 50000000 bytes sent in 1672ms: 298915 msgs/s and 29.89 Mb/s, 0 messages failed, snappy compression
+
+**Result:**
+
+Message transfer rate is approximately **300000 messages per second**,
+**30 megabytes per second**.
+
+
+#### Test4: Produce to two brokers, two partitions, required.acks=2, 100 byte messages, gzip compression
+
+**Command:**
+
+	# examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test3:TwoBrokers:500kmsgs:100bytes:gzip" -S 1 -a 2 -z gzip
+	....
+	% 500000 messages and 50000000 bytes sent in 2111ms: 236812 msgs/s and 23.68 Mb/s, 0 messages failed, gzip compression
+
+**Result:**
+
+Message transfer rate is approximately **230000 messages per second**,
+**23 megabytes per second**.
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/LICENSE
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE b/thirdparty/librdkafka-0.11.4/LICENSE
new file mode 100644
index 0000000..ba78cc2
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/LICENSE
@@ -0,0 +1,25 @@
+librdkafka - Apache Kafka C driver library
+
+Copyright (c) 2012, Magnus Edenhill
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/LICENSE.crc32c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.crc32c b/thirdparty/librdkafka-0.11.4/LICENSE.crc32c
new file mode 100644
index 0000000..482a345
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/LICENSE.crc32c
@@ -0,0 +1,28 @@
+# For src/crc32c.c copied (with modifications) from
+# http://stackoverflow.com/a/17646775/1821055
+
+/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction
+ * Copyright (C) 2013 Mark Adler
+ * Version 1.1  1 Aug 2013  Mark Adler
+ */
+
+/*
+  This software is provided 'as-is', without any express or implied
+  warranty.  In no event will the author be held liable for any damages
+  arising from the use of this software.
+
+  Permission is granted to anyone to use this software for any purpose,
+  including commercial applications, and to alter it and redistribute it
+  freely, subject to the following restrictions:
+
+  1. The origin of this software must not be misrepresented; you must not
+     claim that you wrote the original software. If you use this software
+     in a product, an acknowledgment in the product documentation would be
+     appreciated but is not required.
+  2. Altered source versions must be plainly marked as such, and must not be
+     misrepresented as being the original software.
+  3. This notice may not be removed or altered from any source distribution.
+
+  Mark Adler
+  madler@alumni.caltech.edu
+ */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/LICENSE.lz4
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.lz4 b/thirdparty/librdkafka-0.11.4/LICENSE.lz4
new file mode 100644
index 0000000..353dfb4
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/LICENSE.lz4
@@ -0,0 +1,26 @@
+src/xxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3
+
+LZ4 Library
+Copyright (c) 2011-2016, Yann Collet
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this
+  list of conditions and the following disclaimer in the documentation and/or
+  other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/LICENSE.murmur2
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.murmur2 b/thirdparty/librdkafka-0.11.4/LICENSE.murmur2
new file mode 100644
index 0000000..296fffa
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/LICENSE.murmur2
@@ -0,0 +1,25 @@
+parts of src/rdmurmur2.c: git@github.com:abrandoned/murmur2.git
+
+
+MurMurHash2 Library
+//-----------------------------------------------------------------------------
+// MurmurHash2 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/LICENSE.pycrc
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.pycrc b/thirdparty/librdkafka-0.11.4/LICENSE.pycrc
new file mode 100644
index 0000000..71baded
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/LICENSE.pycrc
@@ -0,0 +1,23 @@
+The following license applies to the files rdcrc32.c and rdcrc32.h which
+have been generated by the pycrc tool.
+============================================================================
+
+Copyright (c) 2006-2012, Thomas Pircher <te...@gmx.net>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/LICENSE.queue
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.queue b/thirdparty/librdkafka-0.11.4/LICENSE.queue
new file mode 100644
index 0000000..14bbf93
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/LICENSE.queue
@@ -0,0 +1,31 @@
+For sys/queue.h:
+
+ * Copyright (c) 1991, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)queue.h	8.5 (Berkeley) 8/20/94
+ * $FreeBSD$
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/LICENSE.regexp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.regexp b/thirdparty/librdkafka-0.11.4/LICENSE.regexp
new file mode 100644
index 0000000..5fa0b10
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/LICENSE.regexp
@@ -0,0 +1,5 @@
+regexp.c and regexp.h from https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684
+
+"
+These libraries are in the public domain (or the equivalent where that is not possible). You can do anything you want with them. You have no legal obligation to do anything else, although I appreciate attribution.
+"

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/LICENSE.snappy
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.snappy b/thirdparty/librdkafka-0.11.4/LICENSE.snappy
new file mode 100644
index 0000000..baa6cfe
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/LICENSE.snappy
@@ -0,0 +1,36 @@
+######################################################################
+# LICENSE.snappy covers files: snappy.c, snappy.h, snappy_compat.h   #
+# originally retrieved from http://github.com/andikleen/snappy-c     #
+# git revision 8015f2d28739b9a6076ebaa6c53fe27bc238d219              #
+######################################################################
+
+The snappy-c code is under the same license as the original snappy source
+
+Copyright 2011 Intel Corporation All Rights Reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Intel Corporation nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/LICENSE.tinycthread
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.tinycthread b/thirdparty/librdkafka-0.11.4/LICENSE.tinycthread
new file mode 100644
index 0000000..0ceadef
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/LICENSE.tinycthread
@@ -0,0 +1,26 @@
+From https://github.com/tinycthread/tinycthread/README.txt c57166cd510ffb5022dd5f127489b131b61441b9
+
+License
+-------
+
+Copyright (c) 2012 Marcus Geelnard
+              2013-2014 Evan Nemerson
+
+This software is provided 'as-is', without any express or implied
+warranty. In no event will the authors be held liable for any damages
+arising from the use of this software.
+
+Permission is granted to anyone to use this software for any purpose,
+including commercial applications, and to alter it and redistribute it
+freely, subject to the following restrictions:
+
+    1. The origin of this software must not be misrepresented; you must not
+    claim that you wrote the original software. If you use this software
+    in a product, an acknowledgment in the product documentation would be
+    appreciated but is not required.
+
+    2. Altered source versions must be plainly marked as such, and must not be
+    misrepresented as being the original software.
+
+    3. This notice may not be removed or altered from any source
+    distribution.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/LICENSE.wingetopt
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.wingetopt b/thirdparty/librdkafka-0.11.4/LICENSE.wingetopt
new file mode 100644
index 0000000..4c28701
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/LICENSE.wingetopt
@@ -0,0 +1,49 @@
+For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt
+
+/*
+ * Copyright (c) 2002 Todd C. Miller <To...@courtesan.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F39502-99-1-0512.
+ */
+/*-
+ * Copyright (c) 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Dieter Baron and Thomas Klausner.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/LICENSES.txt
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/LICENSES.txt b/thirdparty/librdkafka-0.11.4/LICENSES.txt
new file mode 100644
index 0000000..ee8a6f4
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/LICENSES.txt
@@ -0,0 +1,313 @@
+LICENSE
+--------------------------------------------------------------
+librdkafka - Apache Kafka C driver library
+
+Copyright (c) 2012, Magnus Edenhill
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+
+LICENSE.crc32c
+--------------------------------------------------------------
+# For src/crc32c.c copied (with modifications) from
+# http://stackoverflow.com/a/17646775/1821055
+
+/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction
+ * Copyright (C) 2013 Mark Adler
+ * Version 1.1  1 Aug 2013  Mark Adler
+ */
+
+/*
+  This software is provided 'as-is', without any express or implied
+  warranty.  In no event will the author be held liable for any damages
+  arising from the use of this software.
+
+  Permission is granted to anyone to use this software for any purpose,
+  including commercial applications, and to alter it and redistribute it
+  freely, subject to the following restrictions:
+
+  1. The origin of this software must not be misrepresented; you must not
+     claim that you wrote the original software. If you use this software
+     in a product, an acknowledgment in the product documentation would be
+     appreciated but is not required.
+  2. Altered source versions must be plainly marked as such, and must not be
+     misrepresented as being the original software.
+  3. This notice may not be removed or altered from any source distribution.
+
+  Mark Adler
+  madler@alumni.caltech.edu
+ */
+
+
+LICENSE.lz4
+--------------------------------------------------------------
+src/xxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3
+
+LZ4 Library
+Copyright (c) 2011-2016, Yann Collet
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this
+  list of conditions and the following disclaimer in the documentation and/or
+  other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+LICENSE.murmur2
+--------------------------------------------------------------
+parts of src/rdmurmur2.c: git@github.com:abrandoned/murmur2.git
+
+
+MurMurHash2 Library
+//-----------------------------------------------------------------------------
+// MurmurHash2 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+LICENSE.pycrc
+--------------------------------------------------------------
+The following license applies to the files rdcrc32.c and rdcrc32.h which
+have been generated by the pycrc tool.
+============================================================================
+
+Copyright (c) 2006-2012, Thomas Pircher <te...@gmx.net>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+
+LICENSE.queue
+--------------------------------------------------------------
+For sys/queue.h:
+
+ * Copyright (c) 1991, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)queue.h	8.5 (Berkeley) 8/20/94
+ * $FreeBSD$
+
+LICENSE.regexp
+--------------------------------------------------------------
+regexp.c and regexp.h from https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684
+
+"
+These libraries are in the public domain (or the equivalent where that is not possible). You can do anything you want with them. You have no legal obligation to do anything else, although I appreciate attribution.
+"
+
+
+LICENSE.snappy
+--------------------------------------------------------------
+######################################################################
+# LICENSE.snappy covers files: snappy.c, snappy.h, snappy_compat.h   #
+# originally retrieved from http://github.com/andikleen/snappy-c     #
+# git revision 8015f2d28739b9a6076ebaa6c53fe27bc238d219              #
+######################################################################
+
+The snappy-c code is under the same license as the original snappy source
+
+Copyright 2011 Intel Corporation All Rights Reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Intel Corporation nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+LICENSE.tinycthread
+--------------------------------------------------------------
+From https://github.com/tinycthread/tinycthread/README.txt c57166cd510ffb5022dd5f127489b131b61441b9
+
+License
+-------
+
+Copyright (c) 2012 Marcus Geelnard
+              2013-2014 Evan Nemerson
+
+This software is provided 'as-is', without any express or implied
+warranty. In no event will the authors be held liable for any damages
+arising from the use of this software.
+
+Permission is granted to anyone to use this software for any purpose,
+including commercial applications, and to alter it and redistribute it
+freely, subject to the following restrictions:
+
+    1. The origin of this software must not be misrepresented; you must not
+    claim that you wrote the original software. If you use this software
+    in a product, an acknowledgment in the product documentation would be
+    appreciated but is not required.
+
+    2. Altered source versions must be plainly marked as such, and must not be
+    misrepresented as being the original software.
+
+    3. This notice may not be removed or altered from any source
+    distribution.
+
+
+LICENSE.wingetopt
+--------------------------------------------------------------
+For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt
+
+/*
+ * Copyright (c) 2002 Todd C. Miller <To...@courtesan.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F39502-99-1-0512.
+ */
+/*-
+ * Copyright (c) 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Dieter Baron and Thomas Klausner.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/Makefile
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/Makefile b/thirdparty/librdkafka-0.11.4/Makefile
new file mode 100755
index 0000000..e428c83
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/Makefile
@@ -0,0 +1,68 @@
+LIBSUBDIRS=	src src-cpp
+
+CHECK_FILES+=	CONFIGURATION.md \
+		examples/rdkafka_example examples/rdkafka_performance \
+		examples/rdkafka_example_cpp
+
+PACKAGE_NAME?=	librdkafka
+VERSION?=	$(shell python packaging/get_version.py src/rdkafka.h)
+
+# Jenkins CI integration
+BUILD_NUMBER ?= 1
+
+.PHONY:
+
+all: mklove-check libs CONFIGURATION.md check
+
+include mklove/Makefile.base
+
+libs:
+	@(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d || exit $?; done)
+
+CONFIGURATION.md: src/rdkafka.h examples
+	@printf "$(MKL_YELLOW)Updating$(MKL_CLR_RESET)\n"
+	@echo '//@file' > CONFIGURATION.md.tmp
+	@(examples/rdkafka_performance -X list >> CONFIGURATION.md.tmp; \
+		cmp CONFIGURATION.md CONFIGURATION.md.tmp || \
+		mv CONFIGURATION.md.tmp CONFIGURATION.md; \
+		rm -f CONFIGURATION.md.tmp)
+
+file-check: CONFIGURATION.md LICENSES.txt examples
+check: file-check
+	@(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d $@ || exit $?; done)
+
+install:
+	@(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d $@ || exit $?; done)
+
+examples tests: .PHONY libs
+	$(MAKE) -C $@
+
+docs:
+	doxygen Doxyfile
+	@echo "Documentation generated in staging-docs"
+
+clean-docs:
+	rm -rf staging-docs
+
+clean:
+	@$(MAKE) -C tests $@
+	@$(MAKE) -C examples $@
+	@(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d $@ ; done)
+
+distclean: clean
+	./configure --clean
+	rm -f config.log config.log.old
+
+archive:
+	git archive --prefix=$(PACKAGE_NAME)-$(VERSION)/ \
+		-o $(PACKAGE_NAME)-$(VERSION).tar.gz HEAD
+	git archive --prefix=$(PACKAGE_NAME)-$(VERSION)/ \
+		-o $(PACKAGE_NAME)-$(VERSION).zip HEAD
+
+rpm: distclean
+	$(MAKE) -C packaging/rpm
+
+LICENSES.txt: .PHONY
+	@(for i in LICENSE LICENSE.*[^~] ; do (echo "$$i" ; echo "--------------------------------------------------------------" ; cat $$i ; echo "" ; echo "") ; done) > $@.tmp
+	@cmp $@ $@.tmp || mv $@.tmp $@ ; rm -f $@.tmp
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/README.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/README.md b/thirdparty/librdkafka-0.11.4/README.md
new file mode 100644
index 0000000..1c3a804
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/README.md
@@ -0,0 +1,168 @@
+librdkafka - the Apache Kafka C/C++ client library
+==================================================
+
+Copyright (c) 2012-2018, [Magnus Edenhill](http://www.edenhill.se/).
+
+[https://github.com/edenhill/librdkafka](https://github.com/edenhill/librdkafka)
+
+[![Gitter chat](https://badges.gitter.im/edenhill/librdkafka.png)](https://gitter.im/edenhill/librdkafka) [![Build status](https://doozer.io/badge/edenhill/librdkafka/buildstatus/master)](https://doozer.io/user/edenhill/librdkafka)
+
+
+**librdkafka** is a C library implementation of the
+[Apache Kafka](http://kafka.apache.org/) protocol, containing both
+Producer and Consumer support. It was designed with message delivery reliability
+and high performance in mind, current figures exceed 1 million msgs/second for
+the producer and 3 million msgs/second for the consumer.
+
+**librdkafka** is licensed under the 2-clause BSD license.
+
+For an introduction to the performance and usage of librdkafka, see
+[INTRODUCTION.md](https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md)
+
+See the [wiki](https://github.com/edenhill/librdkafka/wiki) for a FAQ.
+
+**NOTE**: The `master` branch is actively developed, use latest release for production use.
+
+
+# Overview #
+  * High-level producer
+  * High-level balanced KafkaConsumer (requires broker >= 0.9)
+  * Simple (legacy) consumer
+  * Compression: snappy, gzip, lz4
+  * [SSL](https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka) support
+  * [SASL](https://github.com/edenhill/librdkafka/wiki/Using-SASL-with-librdkafka) (GSSAPI/Kerberos/SSPI, PLAIN, SCRAM) support
+  * Broker version support: >=0.8 (see [Broker version compatibility](https://github.com/edenhill/librdkafka/wiki/Broker-version-compatibility))
+  * Stable C & C++ APIs (ABI safety guaranteed for C)
+  * [Statistics](https://github.com/edenhill/librdkafka/wiki/Statistics) metrics
+  * Debian package: librdkafka1 and librdkafka-dev in Debian and Ubuntu
+  * RPM package: librdkafka and librdkafka-devel
+  * Gentoo package: dev-libs/librdkafka
+  * Portable: runs on Linux, OSX, Win32, Solaris, FreeBSD, AIX, ...
+
+
+# Language bindings #
+
+  * C#/.NET: [confluent-kafka-dotnet](https://github.com/confluentinc/confluent-kafka-dotnet) (based on [rdkafka-dotnet](https://github.com/ah-/rdkafka-dotnet))
+  * C++: [cppkafka](https://github.com/mfontanini/cppkafka)
+  * D (C-like): [librdkafka](https://github.com/DlangApache/librdkafka/)
+  * D (C++-like): [librdkafkad](https://github.com/tamediadigital/librdkafka-d)
+  * Erlang: [erlkaf](https://github.com/silviucpp/erlkaf)
+  * Go: [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go)
+  * Haskell (kafka, conduit, avro, schema registry): [hw-kafka](https://github.com/haskell-works/hw-kafka)
+  * Haskell: [haskakafka](https://github.com/cosbynator/haskakafka)
+  * Haskell: [haskell-kafka](https://github.com/yanatan16/haskell-kafka)
+  * Lua: [luardkafka](https://github.com/mistsv/luardkafka)
+  * Node.js: [node-rdkafka](https://github.com/Blizzard/node-rdkafka)
+  * Node.js: [node-kafka](https://github.com/sutoiku/node-kafka)
+  * Node.js: [kafka-native](https://github.com/jut-io/node-kafka-native)
+  * OCaml: [ocaml-kafka](https://github.com/didier-wenzek/ocaml-kafka)
+  * PHP: [phpkafka](https://github.com/EVODelavega/phpkafka)
+  * PHP: [php-rdkafka](https://github.com/arnaud-lb/php-rdkafka)
+  * Python: [confluent-kafka-python](https://github.com/confluentinc/confluent-kafka-python)
+  * Python: [PyKafka](https://github.com/Parsely/pykafka)
+  * Ruby: [Hermann](https://github.com/reiseburo/hermann)
+  * Ruby: [rdkafka-ruby](https://github.com/appsignal/rdkafka-ruby)
+  * Rust: [rust-rdkafka](https://github.com/fede1024/rust-rdkafka)
+  * Tcl: [KafkaTcl](https://github.com/flightaware/kafkatcl)
+  * Swift: [Perfect-Kafka](https://github.com/PerfectlySoft/Perfect-Kafka)
+
+# Users of librdkafka #
+
+  * [kafkacat](https://github.com/edenhill/kafkacat) - Apache Kafka swiss army knife
+  * [Wikimedia's varnishkafka](https://github.com/wikimedia/varnishkafka) - Varnish cache web log producer
+  * [Wikimedia's kafkatee](https://github.com/wikimedia/analytics-kafkatee) - Kafka multi consumer with filtering and fanout
+  * [rsyslog](http://www.rsyslog.com)
+  * [syslog-ng](http://syslog-ng.org)
+  * [collectd](http://collectd.org)
+  * [logkafka](https://github.com/Qihoo360/logkafka) - Collect logs and send to Kafka
+  * [redBorder](http://www.redborder.net)
+  * [Headweb](http://www.headweb.com/)
+  * [Produban's log2kafka](https://github.com/Produban/log2kafka) - Web log producer
+  * [fuse_kafka](https://github.com/yazgoo/fuse_kafka) - FUSE file system layer
+  * [node-kafkacat](https://github.com/Rafflecopter/node-kafkacat)
+  * [OVH](http://ovh.com) - [AntiDDOS](http://www.slideshare.net/hugfrance/hugfr-6-oct2014ovhantiddos)
+  * [otto.de](http://otto.de)'s [trackdrd](https://github.com/otto-de/trackrdrd) - Varnish log reader
+  * [Microwish](https://github.com/microwish) has a range of Kafka utilites for log aggregation, HDFS integration, etc.
+  * [aidp](https://github.com/weiboad/aidp) - kafka consumer embedded Lua scripting language in data process framework
+  * [Yandex ClickHouse](https://github.com/yandex/ClickHouse)
+  * [NXLog](http://nxlog.co/) - Enterprise logging system, Kafka input/output plugin.
+  * large unnamed financial institutions
+  * and many more..
+  * *Let [me](mailto:rdkafka@edenhill.se) know if you are using librdkafka*
+
+
+
+# Usage
+
+## Requirements
+	The GNU toolchain
+	GNU make
+   	pthreads
+	zlib (optional, for gzip compression support)
+	libssl-dev (optional, for SSL and SASL SCRAM support)
+	libsasl2-dev (optional, for SASL GSSAPI support)
+
+## Instructions
+
+### Building
+
+      ./configure
+      make
+      sudo make install
+
+
+**NOTE**: See [README.win32](README.win32) for instructions how to build
+          on Windows with Microsoft Visual Studio.
+
+**NOTE**: See [CMake instructions](packaging/cmake/README.md) for experimental
+          CMake build (unsupported).
+
+
+### Usage in code
+
+See [examples/rdkafka_example.c](https://github.com/edenhill/librdkafka/blob/master/examples/rdkafka_example.c) for an example producer and consumer.
+
+Link your program with `-lrdkafka -lz -lpthread -lrt`.
+
+
+## Documentation
+
+The public APIs are documented in their respective header files:
+ * The **C** API is documented in [src/rdkafka.h](src/rdkafka.h)
+ * The **C++** API is documented in [src-cpp/rdkafkacpp.h](src-cpp/rdkafkacpp.h)
+
+To generate Doxygen documents for the API, type:
+
+    make docs
+
+
+Configuration properties are documented in
+[CONFIGURATION.md](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
+
+For a librdkafka introduction, see
+[INTRODUCTION.md](https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md)
+
+
+## Examples
+
+See the `examples/`sub-directory.
+
+
+## Tests
+
+See the `tests/`sub-directory.
+
+
+## Support
+
+File bug reports, feature requests and questions using
+[GitHub Issues](https://github.com/edenhill/librdkafka/issues)
+
+
+Questions and discussions are also welcome on irc.freenode.org, #apache-kafka,
+nickname Snaps.
+
+
+### Commercial support
+
+Commercial support is available from [Edenhill services](http://www.edenhill.se)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/README.win32
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/README.win32 b/thirdparty/librdkafka-0.11.4/README.win32
new file mode 100644
index 0000000..de9b5e4
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/README.win32
@@ -0,0 +1,28 @@
+
+Native win32 build instructions using Microsoft Visual Studio 2013 (MSVC).
+
+Requirements:
+ * zlib is installed automatically from NuGet,
+   but probably requires the NuGet VS extension.
+ * OpenSSL-win32 must be installed in C:\OpenSSL-win32.
+   Download and install the latest v1.0.2 non-light package from:
+   https://slproweb.com/products/Win32OpenSSL.html
+   (This would be using NuGet too but the current
+    OpenSSL packages are outdated and with broken
+    dependencies, so no luck)
+
+The Visual Studio solution file for librdkafka resides in win32/librdkafka.sln
+
+Artifacts:
+ - C library
+ - C++ library
+ - rdkafka_example
+ - tests
+
+ Missing:
+  - remaining tools (rdkafka_performance, etc)
+  - SASL support (no official Cyrus libsasl2 DLLs available)
+
+If you build librdkafka with an external tool (ie CMake) you can get rid of the 
+__declspec(dllexport) / __declspec(dllimport) decorations by adding a define
+-DLIBRDKAFKA_STATICLIB to your CFLAGS

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/configure
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/configure b/thirdparty/librdkafka-0.11.4/configure
new file mode 100755
index 0000000..a76452a
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/configure
@@ -0,0 +1,214 @@
+#!/usr/bin/env bash
+#
+
+BASHVER=$(expr ${BASH_VERSINFO[0]} \* 1000 + ${BASH_VERSINFO[1]})
+
+if [ "$BASHVER" -lt 3002 ]; then
+    echo "ERROR: mklove requires bash version 3.2 or later but you are using $BASH_VERSION ($BASHVER)"
+    echo "       See https://github.com/edenhill/mklove/issues/15"
+    exit 1
+fi
+
+MKL_CONFIGURE_ARGS="$0 $*"
+
+# Load base module
+source mklove/modules/configure.base
+
+# Read some special command line options right away that must be known prior to
+# sourcing modules.
+mkl_in_list "$*" "--no-download" && MKL_NO_DOWNLOAD=1
+# Disable downloads when --help is used to avoid blocking calls.
+mkl_in_list "$*" "--help" && MKL_NO_DOWNLOAD=1
+mkl_in_list "$*" "--debug" && MKL_DEBUG=1
+
+# This is the earliest possible time to check for color support in
+# terminal because mkl_check_terminal_color_support uses mkl_dbg which
+# needs to know if MKL_DEBUG is set
+mkl_check_terminal_color_support
+
+# Delete temporary Makefile and header files on exit.
+trap "{ rm -f $MKL_OUTMK $MKL_OUTH; }" EXIT
+
+
+
+##
+## Load builtin modules
+##
+
+# Builtin options, etc.
+mkl_require builtin
+
+# Host/target support
+mkl_require host
+
+# Compiler detection
+mkl_require cc
+
+
+# Load application provided modules (in current directory), if any.
+for fname in configure.* ; do
+    if [[ $fname = 'configure.*' ]]; then
+        continue
+    fi
+
+    # Skip temporary files
+    if [[ $fname = *~ ]]; then
+        continue
+    fi
+
+    mkl_require $fname
+done
+
+
+
+
+##
+## Argument parsing (options)
+##
+##
+
+_SAVE_ARGS="$*"
+
+# Parse arguments
+while [[ ! -z $@ ]]; do
+    if [[ $1 != --* ]]; then
+        mkl_err "Unknown non-option argument: $1"
+        mkl_usage
+        exit 1
+    fi
+
+    opt=${1#--}
+    shift
+
+    if [[ $opt = *=* ]]; then
+        name="${opt%=*}"
+        arg="${opt#*=}"
+        eqarg=1
+    else
+        name="$opt"
+        arg=""
+        eqarg=0
+    fi
+
+    safeopt="$(mkl_env_esc $name)"
+
+    if ! mkl_func_exists opt_$safeopt ; then
+        mkl_err "Unknown option $opt"
+        mkl_usage
+        exit 1
+    fi
+
+    # Check if this option needs an argument.
+    reqarg=$(mkl_meta_get "MKL_OPT_ARGS" "$(mkl_env_esc $name)")
+    if [[ ! -z $reqarg ]]; then
+        if [[ $eqarg == 0 && -z $arg ]]; then
+            arg=$1
+            shift
+
+            if [[ -z $arg ]]; then
+                mkl_err "Missing argument to option --$name $reqarg"
+                exit 1
+            fi
+        fi
+    else
+        if [[ ! -z $arg ]]; then
+            mkl_err "Option --$name expects no argument"
+            exit 1
+        fi
+        arg=y
+    fi
+
+    case $name in
+        re|reconfigure)
+            oldcmd=$(head -1 config.log | grep '^# configure exec: ' | \
+                sed -e 's/^\# configure exec: [^ ]*configure//')
+            echo "Reconfiguring: $0 $oldcmd"
+            exec $0 $oldcmd
+            ;;
+
+        list-modules)
+            echo "Modules loaded:"
+            for mod in $MKL_MODULES ; do
+                echo "  $mod"
+            done
+            exit 0
+            ;;
+
+        list-checks)
+            echo "Check functions in calling order:"
+            for mf in $MKL_CHECKS ; do
+                mod=${mf%:*}
+                func=${mf#*:}
+                echo -e "${MKL_GREEN}From module $mod:$MKL_CLR_RESET"
+                declare -f $func
+                echo ""
+            done
+            exit 0
+            ;;
+
+        update-modules)
+            fails=0
+            echo "Updating modules"
+            for mod in $MKL_MODULES ; do
+                echo -n "Updating $mod..."
+                if mkl_module_download "$mod" > /dev/null ; then
+                    echo -e "${MKL_GREEN}ok${MKL_CLR_RESET}"
+                else
+                    echo -e "${MKL_RED}failed${MKL_CLR_RESET}"
+                    fails=$(expr $fails + 1)
+                fi
+            done
+            exit $fails
+            ;;
+
+        help)
+            mkl_usage
+            exit 0
+            ;;
+
+        *)
+            opt_$safeopt $arg || exit 1
+            mkl_var_append MKL_OPTS_SET "$safeopt"
+            ;;
+    esac
+done
+
+if [[ ! -z $MKL_CLEAN ]]; then
+    mkl_clean
+    exit 0
+fi
+
+# Move away previous log file
+[[ -f $MKL_OUTDBG ]] && mv $MKL_OUTDBG ${MKL_OUTDBG}.old
+
+
+# Create output files
+echo "# configure exec: $0 $_SAVE_ARGS" >> $MKL_OUTDBG
+echo "# On $(date)" >> $MKL_OUTDBG
+
+rm -f $MKL_OUTMK $MKL_OUTH
+
+
+# Load cache file
+mkl_cache_read
+
+# Run checks
+mkl_checks_run
+
+# Check accumulated failures, will not return on failure.
+mkl_check_fails
+
+# Generate outputs
+mkl_generate
+
+# Summarize what happened
+mkl_summary
+
+# Write cache file
+mkl_cache_write
+
+
+echo ""
+echo "Now type 'make' to build"
+trap - EXIT
+exit 0

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/configure.librdkafka
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/configure.librdkafka b/thirdparty/librdkafka-0.11.4/configure.librdkafka
new file mode 100644
index 0000000..500d1e0
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/configure.librdkafka
@@ -0,0 +1,215 @@
+#!/bin/bash
+#
+
+mkl_meta_set "description" "name"      "librdkafka"
+mkl_meta_set "description" "oneline"   "The Apache Kafka C/C++ library"
+mkl_meta_set "description" "long"      "Full Apache Kafka protocol support, including producer and consumer"
+mkl_meta_set "description" "copyright" "Copyright (c) 2012-2015 Magnus Edenhill"
+
+# Enable generation of pkg-config .pc file
+mkl_mkvar_set "" GEN_PKG_CONFIG y
+
+
+mkl_require cxx
+mkl_require lib
+mkl_require pic
+mkl_require atomics
+mkl_require good_cflags
+mkl_require socket
+
+# Generate version variables from rdkafka.h hex version define
+# so we can use it as string version when generating a pkg-config file.
+
+verdef=$(grep '^#define  *RD_KAFKA_VERSION  *0x' src/rdkafka.h | sed 's/^#define  *RD_KAFKA_VERSION  *\(0x[a-f0-9]*\)\.*$/\1/')
+mkl_require parseversion hex2str "%d.%d.%d" "$verdef" RDKAFKA_VERSION_STR
+
+mkl_toggle_option "Development" ENABLE_DEVEL "--enable-devel" "Enable development asserts, checks, etc" "n"
+mkl_toggle_option "Development" ENABLE_VALGRIND "--enable-valgrind" "Enable in-code valgrind suppressions" "n"
+
+mkl_toggle_option "Development" ENABLE_REFCNT_DEBUG "--enable-refcnt-debug" "Enable refcnt debugging" "n"
+
+mkl_toggle_option "Development" ENABLE_SHAREDPTR_DEBUG "--enable-sharedptr-debug" "Enable sharedptr debugging" "n"
+
+mkl_toggle_option "Feature" ENABLE_LZ4_EXT "--enable-lz4" "Enable external LZ4 library support" "y"
+
+mkl_toggle_option "Feature" ENABLE_SSL "--enable-ssl" "Enable SSL support" "y"
+mkl_toggle_option "Feature" ENABLE_SASL "--enable-sasl" "Enable SASL support with Cyrus libsasl2" "y"
+
+
+function checks {
+
+    # -lrt is needed on linux for clock_gettime: link it if it exists.
+    mkl_lib_check "librt" "" cont CC "-lrt"
+
+    # required libs
+    mkl_lib_check "libpthread" "" fail CC "-lpthread" \
+                  "#include <pthread.h>"
+
+    # Check if dlopen() is available
+    mkl_lib_check "libdl" "WITH_LIBDL" disable CC "-ldl" \
+"
+#include <stdlib.h>
+#include <dlfcn.h>
+void foo (void) {
+   void *h = dlopen(\"__bad_lib\", 0);
+   void *p = dlsym(h, \"sym\");
+   if (p)
+     p = NULL;
+   dlclose(h);
+}"
+
+    if [[ $WITH_LIBDL == "y" ]]; then
+        mkl_allvar_set WITH_PLUGINS WITH_PLUGINS y
+    fi
+
+    # optional libs
+    mkl_lib_check "zlib" "WITH_ZLIB" disable CC "-lz" \
+                  "#include <zlib.h>"
+    mkl_lib_check "libcrypto" "" disable CC "-lcrypto"
+
+    if [[ "$ENABLE_LZ4_EXT" == "y" ]]; then
+        mkl_lib_check --static=-llz4 "liblz4" "WITH_LZ4_EXT" disable CC "-llz4" \
+                      "#include <lz4frame.h>"
+    fi
+
+    # Snappy support is built-in
+    mkl_allvar_set WITH_SNAPPY WITH_SNAPPY y
+
+    # Enable sockem (tests)
+    mkl_allvar_set WITH_SOCKEM WITH_SOCKEM y
+
+    if [[ "$ENABLE_SSL" == "y" ]]; then
+	mkl_meta_set "libssl" "deb" "libssl-dev"
+        if [[ $MKL_DISTRO == "osx" ]]; then
+            # Add brew's OpenSSL pkg-config path on OSX
+            export PKG_CONFIG_PATH="$PKG_CONFIG_PATH:/usr/local/opt/openssl/lib/pkgconfig"
+        fi
+	mkl_lib_check "libssl" "WITH_SSL" disable CC "-lssl" \
+                      "#include <openssl/ssl.h>"
+    fi
+
+    if [[ "$ENABLE_SASL" == "y" ]]; then
+        mkl_meta_set "libsasl2" "deb" "libsasl2-dev"
+        if ! mkl_lib_check "libsasl2" "WITH_SASL_CYRUS" disable CC "-lsasl2" "#include <sasl/sasl.h>" ; then
+	    mkl_lib_check "libsasl" "WITH_SASL_CYRUS" disable CC "-lsasl" \
+                          "#include <sasl/sasl.h>"
+        fi
+    fi
+
+    if [[ "$WITH_SSL" == "y" ]]; then
+        # SASL SCRAM requires base64 encoding from OpenSSL
+        mkl_allvar_set WITH_SASL_SCRAM WITH_SASL_SCRAM y
+    fi
+
+    # CRC32C: check for crc32 instruction support.
+    #         This is also checked during runtime using cpuid.
+    mkl_compile_check crc32chw WITH_CRC32C_HW disable CC "" \
+                      "
+#include <inttypes.h>
+#include <stdio.h>
+#define LONGx1 \"8192\"
+#define LONGx2 \"16384\"
+void foo (void) {
+   const char *n = \"abcdefghijklmnopqrstuvwxyz0123456789\";
+   uint64_t c0 = 0, c1 = 1, c2 = 2;
+   uint64_t s;
+   uint32_t eax = 1, ecx;
+   __asm__(\"cpuid\"
+           : \"=c\"(ecx)
+           : \"a\"(eax)
+           : \"%ebx\", \"%edx\");
+   __asm__(\"crc32b\t\" \"(%1), %0\"
+           : \"=r\"(c0)
+           : \"r\"(n), \"0\"(c0));
+   __asm__(\"crc32q\t\" \"(%3), %0\n\t\"
+           \"crc32q\t\" LONGx1 \"(%3), %1\n\t\"
+           \"crc32q\t\" LONGx2 \"(%3), %2\"
+           : \"=r\"(c0), \"=r\"(c1), \"=r\"(c2)
+           : \"r\"(n), \"0\"(c0), \"1\"(c1), \"2\"(c2));
+  s = c0 + c1 + c2;
+  printf(\"avoiding unused code removal by printing %d, %d, %d\n\", (int)s, (int)eax, (int)ecx);
+}
+"
+
+
+    # Check for libc regex
+    mkl_compile_check "regex" "HAVE_REGEX" disable CC "" \
+"
+#include <stddef.h>
+#include <regex.h>
+void foo (void) {
+   regcomp(NULL, NULL, 0);
+   regexec(NULL, NULL, 0, NULL, 0);
+   regerror(0, NULL, NULL, 0);
+   regfree(NULL);
+}"
+
+
+    # Older g++ (<=4.1?) gives invalid warnings for the C++ code.
+    mkl_mkvar_append CXXFLAGS CXXFLAGS "-Wno-non-virtual-dtor"
+
+    # Required on SunOS
+    if [[ $MKL_DISTRO == "SunOS" ]]; then
+	mkl_mkvar_append CPPFLAGS CPPFLAGS "-D_POSIX_PTHREAD_SEMANTICS -D_REENTRANT -D__EXTENSIONS__"
+	# Source defines _POSIX_C_SOURCE to 200809L for Solaris, and this is
+	# incompatible on that platform with compilers < c99.
+	mkl_mkvar_append CFLAGS CFLAGS "-std=c99"
+    fi
+
+    # Check if strndup() is available (isn't on Solaris 10)
+    mkl_compile_check "strndup" "HAVE_STRNDUP" disable CC "" \
+"#include <string.h>
+int foo (void) {
+   return strndup(\"hi\", 2) ? 0 : 1;
+}"
+
+    # Check if strerror_r() is available.
+    # The check for GNU vs XSI is done in rdposix.h since
+    # we can't rely on all defines to be set here (_GNU_SOURCE).
+    mkl_compile_check "strerror_r" "HAVE_STRERROR_R" disable CC "" \
+"#include <string.h>
+const char *foo (void) {
+   static char buf[64];
+   strerror_r(1, buf, sizeof(buf));
+   return buf;
+}"
+
+
+    # See if GNU's pthread_setname_np() is available, and in what form.
+    mkl_compile_check "pthread_setname_gnu" "HAVE_PTHREAD_SETNAME_GNU" disable CC "-D_GNU_SOURCE -lpthread" \
+'
+#include <pthread.h>
+
+void foo (void) {
+  pthread_setname_np(pthread_self(), "abc");
+}
+'
+
+    # Figure out what tool to use for dumping public symbols.
+    # We rely on configure.cc setting up $NM if it exists.
+    if mkl_env_check "nm" "" cont "NM" ; then
+	# nm by future mk var
+	if [[ $MKL_DISTRO == "osx" || $MKL_DISTRO == "AIX" ]]; then
+	    mkl_mkvar_set SYMDUMPER SYMDUMPER '$(NM) -g'
+	else
+	    mkl_mkvar_set SYMDUMPER SYMDUMPER '$(NM) -D'
+	fi
+    else
+	# Fake symdumper
+	mkl_mkvar_set SYMDUMPER SYMDUMPER 'echo'
+    fi
+
+    # The linker-script generator (lds-gen.py) requires python
+    if [[ $WITH_LDS == y ]]; then
+        if ! mkl_command_check python "HAVE_PYTHON" "disable" "python -V"; then
+            mkl_err "disabling linker-script since python is not available"
+            mkl_mkvar_set WITH_LDS WITH_LDS "n"
+        fi
+    fi
+
+    if [[ "$ENABLE_VALGRIND" == "y" ]]; then
+	mkl_compile_check valgrind WITH_VALGRIND disable CC "" \
+			  "#include <valgrind/memcheck.h>"
+    fi
+}
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/dev-conf.sh
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/dev-conf.sh b/thirdparty/librdkafka-0.11.4/dev-conf.sh
new file mode 100755
index 0000000..c334f97
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/dev-conf.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+#
+# Configure librdkafka for development
+
+set -e
+./configure --clean
+
+# enable pedantic
+#export CFLAGS='-std=c99 -pedantic -Wshadow'
+#export CXXFLAGS='-std=c++98 -pedantic'
+
+# enable FSAN
+#FSAN="-fsanitize=address"
+#export CPPFLAGS="$CPPFLAGS $FSAN"
+#export LDFLAGS="$LDFLAGS $FSAN"
+
+OPTS=""
+
+# enable devel asserts
+OPTS="$OPTS --enable-devel"
+
+# disable optimizations
+OPTS="$OPTS --disable-optimization"
+
+# gprof
+#OPTS="$OPTS --enable-profiling --disable-optimization"
+
+# disable lz4
+#OPTS="$OPTS --disable-lz4"
+
+# disable cyrus-sasl
+#OPTS="$OPTS --disable-sasl"
+
+# enable sharedptr debugging
+#OPTS="$OPTS --enable-sharedptr-debug"
+
+#enable refcnt debugging
+#OPTS="$OPTS --enable-refcnt-debug"
+
+echo "Devel configuration options: $OPTS"
+./configure $OPTS
+
+make clean
+make -j
+(cd tests ; make -j build)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/examples/.gitignore
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/examples/.gitignore b/thirdparty/librdkafka-0.11.4/examples/.gitignore
new file mode 100644
index 0000000..3dc3aab
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/examples/.gitignore
@@ -0,0 +1,8 @@
+rdkafka_example
+rdkafka_performance
+rdkafka_example_cpp
+rdkafka_consumer_example
+rdkafka_consumer_example_cpp
+kafkatest_verifiable_client
+rdkafka_simple_producer
+rdkafka_consume_batch


[11/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/wingetopt.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/wingetopt.c b/thirdparty/librdkafka-0.11.1/win32/wingetopt.c
deleted file mode 100644
index 50ed2f0..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/wingetopt.c
+++ /dev/null
@@ -1,564 +0,0 @@
-/*	$OpenBSD: getopt_long.c,v 1.23 2007/10/31 12:34:57 chl Exp $	*/
-/*	$NetBSD: getopt_long.c,v 1.15 2002/01/31 22:43:40 tv Exp $	*/
-
-/*
- * Copyright (c) 2002 Todd C. Miller <To...@courtesan.com>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- * Sponsored in part by the Defense Advanced Research Projects
- * Agency (DARPA) and Air Force Research Laboratory, Air Force
- * Materiel Command, USAF, under agreement number F39502-99-1-0512.
- */
-/*-
- * Copyright (c) 2000 The NetBSD Foundation, Inc.
- * All rights reserved.
- *
- * This code is derived from software contributed to The NetBSD Foundation
- * by Dieter Baron and Thomas Klausner.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <errno.h>
-#include <stdlib.h>
-#include <string.h>
-#include "wingetopt.h"
-#include <stdarg.h>
-#include <stdio.h>
-#include <windows.h>
-
-#define	REPLACE_GETOPT		/* use this getopt as the system getopt(3) */
-
-#ifdef REPLACE_GETOPT
-int	opterr = 1;		/* if error message should be printed */
-int	optind = 1;		/* index into parent argv vector */
-int	optopt = '?';		/* character checked for validity */
-#undef	optreset		/* see getopt.h */
-#define	optreset		__mingw_optreset
-int	optreset;		/* reset getopt */
-char    *optarg;		/* argument associated with option */
-#endif
-
-#define PRINT_ERROR	((opterr) && (*options != ':'))
-
-#define FLAG_PERMUTE	0x01	/* permute non-options to the end of argv */
-#define FLAG_ALLARGS	0x02	/* treat non-options as args to option "-1" */
-#define FLAG_LONGONLY	0x04	/* operate as getopt_long_only */
-
-/* return values */
-#define	BADCH		(int)'?'
-#define	BADARG		((*options == ':') ? (int)':' : (int)'?')
-#define	INORDER 	(int)1
-
-#ifndef __CYGWIN__
-#define __progname __argv[0]
-#else
-extern char __declspec(dllimport) *__progname;
-#endif
-
-#ifdef __CYGWIN__
-static char EMSG[] = "";
-#else
-#define	EMSG		""
-#endif
-
-static int getopt_internal(int, char * const *, const char *,
-			   const struct option *, int *, int);
-static int parse_long_options(char * const *, const char *,
-			      const struct option *, int *, int);
-static int gcd(int, int);
-static void permute_args(int, int, int, char * const *);
-
-static char *place = EMSG; /* option letter processing */
-
-/* XXX: set optreset to 1 rather than these two */
-static int nonopt_start = -1; /* first non option argument (for permute) */
-static int nonopt_end = -1;   /* first option after non options (for permute) */
-
-/* Error messages */
-static const char recargchar[] = "option requires an argument -- %c";
-static const char recargstring[] = "option requires an argument -- %s";
-static const char ambig[] = "ambiguous option -- %.*s";
-static const char noarg[] = "option doesn't take an argument -- %.*s";
-static const char illoptchar[] = "unknown option -- %c";
-static const char illoptstring[] = "unknown option -- %s";
-
-static void
-_vwarnx(const char *fmt,va_list ap)
-{
-  (void)fprintf(stderr,"%s: ",__progname);
-  if (fmt != NULL)
-    (void)vfprintf(stderr,fmt,ap);
-  (void)fprintf(stderr,"\n");
-}
-
-static void
-warnx(const char *fmt,...)
-{
-  va_list ap;
-  va_start(ap,fmt);
-  _vwarnx(fmt,ap);
-  va_end(ap);
-}
-
-/*
- * Compute the greatest common divisor of a and b.
- */
-static int
-gcd(int a, int b)
-{
-	int c;
-
-	c = a % b;
-	while (c != 0) {
-		a = b;
-		b = c;
-		c = a % b;
-	}
-
-	return (b);
-}
-
-/*
- * Exchange the block from nonopt_start to nonopt_end with the block
- * from nonopt_end to opt_end (keeping the same order of arguments
- * in each block).
- */
-static void
-permute_args(int panonopt_start, int panonopt_end, int opt_end,
-	char * const *nargv)
-{
-	int cstart, cyclelen, i, j, ncycle, nnonopts, nopts, pos;
-	char *swap;
-
-	/*
-	 * compute lengths of blocks and number and size of cycles
-	 */
-	nnonopts = panonopt_end - panonopt_start;
-	nopts = opt_end - panonopt_end;
-	ncycle = gcd(nnonopts, nopts);
-	cyclelen = (opt_end - panonopt_start) / ncycle;
-
-	for (i = 0; i < ncycle; i++) {
-		cstart = panonopt_end+i;
-		pos = cstart;
-		for (j = 0; j < cyclelen; j++) {
-			if (pos >= panonopt_end)
-				pos -= nnonopts;
-			else
-				pos += nopts;
-			swap = nargv[pos];
-			/* LINTED const cast */
-			((char **) nargv)[pos] = nargv[cstart];
-			/* LINTED const cast */
-			((char **)nargv)[cstart] = swap;
-		}
-	}
-}
-
-/*
- * parse_long_options --
- *	Parse long options in argc/argv argument vector.
- * Returns -1 if short_too is set and the option does not match long_options.
- */
-static int
-parse_long_options(char * const *nargv, const char *options,
-	const struct option *long_options, int *idx, int short_too)
-{
-	char *current_argv, *has_equal;
-	size_t current_argv_len;
-	int i, ambiguous, match;
-
-#define IDENTICAL_INTERPRETATION(_x, _y)                                \
-	(long_options[(_x)].has_arg == long_options[(_y)].has_arg &&    \
-	 long_options[(_x)].flag == long_options[(_y)].flag &&          \
-	 long_options[(_x)].val == long_options[(_y)].val)
-
-	current_argv = place;
-	match = -1;
-	ambiguous = 0;
-
-	optind++;
-
-	if ((has_equal = strchr(current_argv, '=')) != NULL) {
-		/* argument found (--option=arg) */
-		current_argv_len = has_equal - current_argv;
-		has_equal++;
-	} else
-		current_argv_len = strlen(current_argv);
-
-	for (i = 0; long_options[i].name; i++) {
-		/* find matching long option */
-		if (strncmp(current_argv, long_options[i].name,
-		    current_argv_len))
-			continue;
-
-		if (strlen(long_options[i].name) == current_argv_len) {
-			/* exact match */
-			match = i;
-			ambiguous = 0;
-			break;
-		}
-		/*
-		 * If this is a known short option, don't allow
-		 * a partial match of a single character.
-		 */
-		if (short_too && current_argv_len == 1)
-			continue;
-
-		if (match == -1)	/* partial match */
-			match = i;
-		else if (!IDENTICAL_INTERPRETATION(i, match))
-			ambiguous = 1;
-	}
-	if (ambiguous) {
-		/* ambiguous abbreviation */
-		if (PRINT_ERROR)
-			warnx(ambig, (int)current_argv_len,
-			     current_argv);
-		optopt = 0;
-		return (BADCH);
-	}
-	if (match != -1) {		/* option found */
-		if (long_options[match].has_arg == no_argument
-		    && has_equal) {
-			if (PRINT_ERROR)
-				warnx(noarg, (int)current_argv_len,
-				     current_argv);
-			/*
-			 * XXX: GNU sets optopt to val regardless of flag
-			 */
-			if (long_options[match].flag == NULL)
-				optopt = long_options[match].val;
-			else
-				optopt = 0;
-			return (BADARG);
-		}
-		if (long_options[match].has_arg == required_argument ||
-		    long_options[match].has_arg == optional_argument) {
-			if (has_equal)
-				optarg = has_equal;
-			else if (long_options[match].has_arg ==
-			    required_argument) {
-				/*
-				 * optional argument doesn't use next nargv
-				 */
-				optarg = nargv[optind++];
-			}
-		}
-		if ((long_options[match].has_arg == required_argument)
-		    && (optarg == NULL)) {
-			/*
-			 * Missing argument; leading ':' indicates no error
-			 * should be generated.
-			 */
-			if (PRINT_ERROR)
-				warnx(recargstring,
-				    current_argv);
-			/*
-			 * XXX: GNU sets optopt to val regardless of flag
-			 */
-			if (long_options[match].flag == NULL)
-				optopt = long_options[match].val;
-			else
-				optopt = 0;
-			--optind;
-			return (BADARG);
-		}
-	} else {			/* unknown option */
-		if (short_too) {
-			--optind;
-			return (-1);
-		}
-		if (PRINT_ERROR)
-			warnx(illoptstring, current_argv);
-		optopt = 0;
-		return (BADCH);
-	}
-	if (idx)
-		*idx = match;
-	if (long_options[match].flag) {
-		*long_options[match].flag = long_options[match].val;
-		return (0);
-	} else
-		return (long_options[match].val);
-#undef IDENTICAL_INTERPRETATION
-}
-
-/*
- * getopt_internal --
- *	Parse argc/argv argument vector.  Called by user level routines.
- */
-static int
-getopt_internal(int nargc, char * const *nargv, const char *options,
-	const struct option *long_options, int *idx, int flags)
-{
-	char *oli;				/* option letter list index */
-	int optchar, short_too;
-	static int posixly_correct = -1;
-
-	if (options == NULL)
-		return (-1);
-
-	/*
-	 * XXX Some GNU programs (like cvs) set optind to 0 instead of
-	 * XXX using optreset.  Work around this braindamage.
-	 */
-	if (optind == 0)
-		optind = optreset = 1;
-
-	/*
-	 * Disable GNU extensions if POSIXLY_CORRECT is set or options
-	 * string begins with a '+'.
-	 *
-	 * CV, 2009-12-14: Check POSIXLY_CORRECT anew if optind == 0 or
-	 *                 optreset != 0 for GNU compatibility.
-	 */
-#ifndef _MSC_VER
-	if (posixly_correct == -1 || optreset != 0)
-		posixly_correct = (getenv("POSIXLY_CORRECT") != NULL);
-#endif
-	if (*options == '-')
-		flags |= FLAG_ALLARGS;
-	else if (posixly_correct || *options == '+')
-		flags &= ~FLAG_PERMUTE;
-	if (*options == '+' || *options == '-')
-		options++;
-
-	optarg = NULL;
-	if (optreset)
-		nonopt_start = nonopt_end = -1;
-start:
-	if (optreset || !*place) {		/* update scanning pointer */
-		optreset = 0;
-		if (optind >= nargc) {          /* end of argument vector */
-			place = EMSG;
-			if (nonopt_end != -1) {
-				/* do permutation, if we have to */
-				permute_args(nonopt_start, nonopt_end,
-				    optind, nargv);
-				optind -= nonopt_end - nonopt_start;
-			}
-			else if (nonopt_start != -1) {
-				/*
-				 * If we skipped non-options, set optind
-				 * to the first of them.
-				 */
-				optind = nonopt_start;
-			}
-			nonopt_start = nonopt_end = -1;
-			return (-1);
-		}
-		if (*(place = nargv[optind]) != '-' ||
-		    (place[1] == '\0' && strchr(options, '-') == NULL)) {
-			place = EMSG;		/* found non-option */
-			if (flags & FLAG_ALLARGS) {
-				/*
-				 * GNU extension:
-				 * return non-option as argument to option 1
-				 */
-				optarg = nargv[optind++];
-				return (INORDER);
-			}
-			if (!(flags & FLAG_PERMUTE)) {
-				/*
-				 * If no permutation wanted, stop parsing
-				 * at first non-option.
-				 */
-				return (-1);
-			}
-			/* do permutation */
-			if (nonopt_start == -1)
-				nonopt_start = optind;
-			else if (nonopt_end != -1) {
-				permute_args(nonopt_start, nonopt_end,
-				    optind, nargv);
-				nonopt_start = optind -
-				    (nonopt_end - nonopt_start);
-				nonopt_end = -1;
-			}
-			optind++;
-			/* process next argument */
-			goto start;
-		}
-		if (nonopt_start != -1 && nonopt_end == -1)
-			nonopt_end = optind;
-
-		/*
-		 * If we have "-" do nothing, if "--" we are done.
-		 */
-		if (place[1] != '\0' && *++place == '-' && place[1] == '\0') {
-			optind++;
-			place = EMSG;
-			/*
-			 * We found an option (--), so if we skipped
-			 * non-options, we have to permute.
-			 */
-			if (nonopt_end != -1) {
-				permute_args(nonopt_start, nonopt_end,
-				    optind, nargv);
-				optind -= nonopt_end - nonopt_start;
-			}
-			nonopt_start = nonopt_end = -1;
-			return (-1);
-		}
-	}
-
-	/*
-	 * Check long options if:
-	 *  1) we were passed some
-	 *  2) the arg is not just "-"
-	 *  3) either the arg starts with -- we are getopt_long_only()
-	 */
-	if (long_options != NULL && place != nargv[optind] &&
-	    (*place == '-' || (flags & FLAG_LONGONLY))) {
-		short_too = 0;
-		if (*place == '-')
-			place++;		/* --foo long option */
-		else if (*place != ':' && strchr(options, *place) != NULL)
-			short_too = 1;		/* could be short option too */
-
-		optchar = parse_long_options(nargv, options, long_options,
-		    idx, short_too);
-		if (optchar != -1) {
-			place = EMSG;
-			return (optchar);
-		}
-	}
-
-	if ((optchar = (int)*place++) == (int)':' ||
-	    (optchar == (int)'-' && *place != '\0') ||
-	    (oli = strchr(options, optchar)) == NULL) {
-		/*
-		 * If the user specified "-" and  '-' isn't listed in
-		 * options, return -1 (non-option) as per POSIX.
-		 * Otherwise, it is an unknown option character (or ':').
-		 */
-		if (optchar == (int)'-' && *place == '\0')
-			return (-1);
-		if (!*place)
-			++optind;
-		if (PRINT_ERROR)
-			warnx(illoptchar, optchar);
-		optopt = optchar;
-		return (BADCH);
-	}
-	if (long_options != NULL && optchar == 'W' && oli[1] == ';') {
-		/* -W long-option */
-		if (*place)			/* no space */
-			/* NOTHING */;
-		else if (++optind >= nargc) {	/* no arg */
-			place = EMSG;
-			if (PRINT_ERROR)
-				warnx(recargchar, optchar);
-			optopt = optchar;
-			return (BADARG);
-		} else				/* white space */
-			place = nargv[optind];
-		optchar = parse_long_options(nargv, options, long_options,
-		    idx, 0);
-		place = EMSG;
-		return (optchar);
-	}
-	if (*++oli != ':') {			/* doesn't take argument */
-		if (!*place)
-			++optind;
-	} else {				/* takes (optional) argument */
-		optarg = NULL;
-		if (*place)			/* no white space */
-			optarg = place;
-		else if (oli[1] != ':') {	/* arg not optional */
-			if (++optind >= nargc) {	/* no arg */
-				place = EMSG;
-				if (PRINT_ERROR)
-					warnx(recargchar, optchar);
-				optopt = optchar;
-				return (BADARG);
-			} else
-				optarg = nargv[optind];
-		}
-		place = EMSG;
-		++optind;
-	}
-	/* dump back option letter */
-	return (optchar);
-}
-
-#ifdef REPLACE_GETOPT
-/*
- * getopt --
- *	Parse argc/argv argument vector.
- *
- * [eventually this will replace the BSD getopt]
- */
-int
-getopt(int nargc, char * const *nargv, const char *options)
-{
-
-	/*
-	 * We don't pass FLAG_PERMUTE to getopt_internal() since
-	 * the BSD getopt(3) (unlike GNU) has never done this.
-	 *
-	 * Furthermore, since many privileged programs call getopt()
-	 * before dropping privileges it makes sense to keep things
-	 * as simple (and bug-free) as possible.
-	 */
-	return (getopt_internal(nargc, nargv, options, NULL, NULL, 0));
-}
-#endif /* REPLACE_GETOPT */
-
-/*
- * getopt_long --
- *	Parse argc/argv argument vector.
- */
-int
-getopt_long(int nargc, char * const *nargv, const char *options,
-    const struct option *long_options, int *idx)
-{
-
-	return (getopt_internal(nargc, nargv, options, long_options, idx,
-	    FLAG_PERMUTE));
-}
-
-/*
- * getopt_long_only --
- *	Parse argc/argv argument vector.
- */
-int
-getopt_long_only(int nargc, char * const *nargv, const char *options,
-    const struct option *long_options, int *idx)
-{
-
-	return (getopt_internal(nargc, nargv, options, long_options, idx,
-	    FLAG_PERMUTE|FLAG_LONGONLY));
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/wingetopt.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/wingetopt.h b/thirdparty/librdkafka-0.11.1/win32/wingetopt.h
deleted file mode 100644
index 260915b..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/wingetopt.h
+++ /dev/null
@@ -1,95 +0,0 @@
-#ifndef __GETOPT_H__
-/**
- * DISCLAIMER
- * This file has no copyright assigned and is placed in the Public Domain.
- * This file is a part of the w64 mingw-runtime package.
- *
- * The w64 mingw-runtime package and its code is distributed in the hope that it 
- * will be useful but WITHOUT ANY WARRANTY.  ALL WARRANTIES, EXPRESSED OR 
- * IMPLIED ARE HEREBY DISCLAIMED.  This includes but is not limited to 
- * warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#define __GETOPT_H__
-
-/* All the headers include this file. */
-#include <crtdefs.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-extern int optind;		/* index of first non-option in argv      */
-extern int optopt;		/* single option character, as parsed     */
-extern int opterr;		/* flag to enable built-in diagnostics... */
-				/* (user may set to zero, to suppress)    */
-
-extern char *optarg;		/* pointer to argument of current option  */
-
-extern int getopt(int nargc, char * const *nargv, const char *options);
-
-#ifdef _BSD_SOURCE
-/*
- * BSD adds the non-standard `optreset' feature, for reinitialisation
- * of `getopt' parsing.  We support this feature, for applications which
- * proclaim their BSD heritage, before including this header; however,
- * to maintain portability, developers are advised to avoid it.
- */
-# define optreset  __mingw_optreset
-extern int optreset;
-#endif
-#ifdef __cplusplus
-}
-#endif
-/*
- * POSIX requires the `getopt' API to be specified in `unistd.h';
- * thus, `unistd.h' includes this header.  However, we do not want
- * to expose the `getopt_long' or `getopt_long_only' APIs, when
- * included in this manner.  Thus, close the standard __GETOPT_H__
- * declarations block, and open an additional __GETOPT_LONG_H__
- * specific block, only when *not* __UNISTD_H_SOURCED__, in which
- * to declare the extended API.
- */
-#endif /* !defined(__GETOPT_H__) */
-
-#if !defined(__UNISTD_H_SOURCED__) && !defined(__GETOPT_LONG_H__)
-#define __GETOPT_LONG_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct option		/* specification for a long form option...	*/
-{
-  const char *name;		/* option name, without leading hyphens */
-  int         has_arg;		/* does it take an argument?		*/
-  int        *flag;		/* where to save its status, or NULL	*/
-  int         val;		/* its associated status value		*/
-};
-
-enum    		/* permitted values for its `has_arg' field...	*/
-{
-  no_argument = 0,      	/* option never takes an argument	*/
-  required_argument,		/* option always requires an argument	*/
-  optional_argument		/* option may take an argument		*/
-};
-
-extern int getopt_long(int nargc, char * const *nargv, const char *options,
-    const struct option *long_options, int *idx);
-extern int getopt_long_only(int nargc, char * const *nargv, const char *options,
-    const struct option *long_options, int *idx);
-/*
- * Previous MinGW implementation had...
- */
-#ifndef HAVE_DECL_GETOPT
-/*
- * ...for the long form API only; keep this for compatibility.
- */
-# define HAVE_DECL_GETOPT	1
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* !defined(__UNISTD_H_SOURCED__) && !defined(__GETOPT_LONG_H__) */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/wintime.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/wintime.h b/thirdparty/librdkafka-0.11.1/win32/wintime.h
deleted file mode 100644
index 9db7c7e..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/wintime.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#pragma once
-
-/**
- * gettimeofday() for Win32 from http://stackoverflow.com/questions/10905892/equivalent-of-gettimeday-for-windows
- */
-#define WIN32_LEAN_AND_MEAN
-#include <Windows.h>
-#include <stdint.h> // portable: uint64_t   MSVC: __int64 
-
-static int gettimeofday(struct timeval * tp, struct timezone * tzp)
-{
-        // Note: some broken versions only have 8 trailing zero's, the correct epoch has 9 trailing zero's
-        // This magic number is the number of 100 nanosecond intervals since January 1, 1601 (UTC)
-        // until 00:00:00 January 1, 1970 
-        static const uint64_t EPOCH = ((uint64_t)116444736000000000ULL);
-
-        SYSTEMTIME  system_time;
-        FILETIME    file_time;
-        uint64_t    time;
-
-        GetSystemTime(&system_time);
-        SystemTimeToFileTime(&system_time, &file_time);
-        time = ((uint64_t)file_time.dwLowDateTime);
-        time += ((uint64_t)file_time.dwHighDateTime) << 32;
-
-        tp->tv_sec = (long)((time - EPOCH) / 10000000L);
-        tp->tv_usec = (long)(system_time.wMilliseconds * 1000);
-        return 0;
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/.appveyor.yml
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/.appveyor.yml b/thirdparty/librdkafka-0.11.4/.appveyor.yml
new file mode 100644
index 0000000..2cb8722
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/.appveyor.yml
@@ -0,0 +1,88 @@
+version: 0.11.4-R-pre{build}
+pull_requests:
+  do_not_increment_build_number: true
+image: Visual Studio 2013
+configuration: Release
+environment:
+  matrix:
+  - platform: x64
+  - platform: win32
+install:
+- ps: "$OpenSSLVersion = \"1_0_2o\"\n$OpenSSLExe = \"OpenSSL-$OpenSSLVersion.exe\"\n\nRemove-Item C:\\OpenSSL-Win32 -recurse\nRemove-Item C:\\OpenSSL-Win64 -recurse\n\nWrite-Host \"Installing OpenSSL v1.0 32-bit ...\" -ForegroundColor Cyan\nWrite-Host \"Downloading...\"\n$exePath = \"$($env:USERPROFILE)\\Win32OpenSSL-1_0_2o.exe\"\n(New-Object Net.WebClient).DownloadFile('https://slproweb.com/download/Win32OpenSSL-1_0_2o.exe', $exePath)\nWrite-Host \"Installing...\"\ncmd /c start /wait $exePath /silent /verysilent /sp- /suppressmsgboxes /DIR=C:\\OpenSSL-Win32\nWrite-Host \"Installed\" -ForegroundColor Green\n\nWrite-Host \"Installing OpenSSL v1.0 64-bit ...\" -ForegroundColor Cyan\nWrite-Host \"Downloading...\"\n$exePath = \"$($env:USERPROFILE)\\Win64OpenSSL-1_0_2o.exe\"\n(New-Object Net.WebClient).DownloadFile('https://slproweb.com/download/Win64OpenSSL-1_0_2o.exe', $exePath)\nWrite-Host \"Installing...\"\ncmd /c start /wait $exePath /silent /verysilent /sp- /suppressmsgboxes /DIR=C
 :\\OpenSSL-Win64\nWrite-Host \"Installed\" -ForegroundColor Green\n\nif (!(Test-Path(\"C:\\OpenSSL-Win32\"))) {\n  echo \"Downloading https://slproweb.com/download/Win32$OpenSSLExe\"\n  Start-FileDownload 'https://slproweb.com/download/Win32$OpenSSLExe'\n  Start-Process \"Win32$OpenSSLExe\" -ArgumentList \"/silent /verysilent /sp- /suppressmsgboxes\" -Wait\n} else {\n   echo \"OpenSSL-Win32 already exists: not downloading\"\n}\n\nif (!(Test-Path(\"C:\\OpenSSL-Win64\"))) {\n  echo \"Downloading https://slproweb.com/download/Win64$OpenSSLExe\"\n  Start-FileDownload 'https://slproweb.com/download/Win64$OpenSSLExe' \n  Start-Process \"Win64$OpenSSLExe\" -ArgumentList \"/silent /verysilent /sp- /suppressmsgboxes\" -Wait\n} else {\n   echo \"OpenSSL-Win64 already exists: not downloading\"\n}\n\n\n\n# Download the CoApp tools.\n$msiPath = \"$($env:USERPROFILE)\\CoApp.Tools.Powershell.msi\"\n(New-Object Net.WebClient).DownloadFile('http://coapp.org/files/CoApp.Tools.Powershell.msi', $msiPat
 h)\n\n# Install the CoApp tools from the downloaded .msi.\nStart-Process -FilePath msiexec -ArgumentList /i, $msiPath, /quiet -Wait\n\n# Make the tools available for later PS scripts to use.\n$env:PSModulePath = $env:PSModulePath + ';C:\\Program Files (x86)\\Outercurve Foundation\\Modules'\nImport-Module CoApp\n\n# Install NuGet\n#Install-PackageProvider NuGet -MinimumVersion '2.8.5.201' -Force\n#Import-PackageProvider NuGet -MinimumVersion '2.8.5.201' -Force\n\n# Install CoApp for creating nuget packages\n#$msiPath = \"$($env:USERPROFILE)\\CoApp.Tools.Powershell.msi\"\n#(New-Object #Net.WebClient).DownloadFile('http://downloads.coapp.org/files/CoApp.Tools.Powershell.msi', $msiPath)\n#cmd /c start /wait msiexec /i \"$msiPath\" /quiet\n\n# Install CoApp module\n#Install-Module CoApp -Force"
+cache:
+- c:\OpenSSL-Win32
+- c:\OpenSSL-Win64
+nuget:
+  account_feed: true
+  project_feed: true
+  disable_publish_on_pr: true
+before_build:
+- cmd: nuget restore win32/librdkafka.sln
+build:
+  project: win32/librdkafka.sln
+  publish_nuget: true
+  publish_nuget_symbols: true
+  include_nuget_references: true
+  parallel: true
+  verbosity: normal
+test_script:
+- cmd: if exist DISABLED\win32\outdir\v140 ( win32\outdir\v140\%PLATFORM%\%CONFIGURATION%\tests.exe -l -p1 ) else ( win32\outdir\v120\%PLATFORM%\%CONFIGURATION%\tests.exe -l -p1 )
+artifacts:
+- path: test_report*.json
+  name: Test report
+- path: '*.nupkg'
+  name: Packages
+- path: '**\*.dll'
+  name: Libraries
+- path: '**\*.lib'
+  name: Libraries
+- path: '**\*.pdb'
+  name: Libraries
+- path: '**\*.exe'
+  name: Executables
+before_deploy:
+- ps: >-
+    # FIXME: Add to Deployment condition above:
+
+    # APPVEYOR_REPO_TAG = true
+
+
+
+    # This is the CoApp .autopkg file to create.
+
+    $autopkgFile = "win32/librdkafka.autopkg"
+
+
+    # Get the ".autopkg.template" file, replace "@version" with the Appveyor version number, then save to the ".autopkg" file.
+
+    cat ($autopkgFile + ".template") | % { $_ -replace "@version", $env:appveyor_build_version } > $autopkgFile
+
+
+    # Use the CoApp tools to create NuGet native packages from the .autopkg.
+
+    Write-NuGetPackage $autopkgFile
+
+
+    # Push all newly created .nupkg files as Appveyor artifacts for later deployment.
+
+    Get-ChildItem .\*.nupkg | % { Push-AppveyorArtifact $_.FullName -FileName $_.Name }
+deploy:
+- provider: S3
+  access_key_id:
+    secure: t+Xo4x1mYVbqzvUDlnuMgFGp8LjQJNOfsDUAMxBsVH4=
+  secret_access_key:
+    secure: SNziQPPJs4poCHM7dk6OxufUYcGQhMWiNPx6Y1y6DYuWGjPc3K0APGeousLHsbLv
+  region: us-west-1
+  bucket: librdkafka-ci-packages
+  folder: librdkafka/p-librdkafka__bld-appveyor__plat-windows__arch-$(platform)__bldtype-$(configuration)__tag-$(APPVEYOR_REPO_TAG_NAME)__sha-$(APPVEYOR_REPO_COMMIT)__bid-$(APPVEYOR_BUILD_ID)
+  artifact: /.*\.(nupkg)/
+  max_error_retry: 3
+  on:
+    APPVEYOR_REPO_TAG: true
+notifications:
+- provider: Email
+  to:
+  - magnus@edenhill.se
+  on_build_success: false
+  on_build_failure: true
+  on_build_status_changed: true

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/.dir-locals.el
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/.dir-locals.el b/thirdparty/librdkafka-0.11.4/.dir-locals.el
new file mode 100644
index 0000000..22ca922
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/.dir-locals.el
@@ -0,0 +1,3 @@
+( (c-mode . ((c-file-style . "linux"))) )
+((nil . ((compile-command . "LC_ALL=C make -C $(git rev-parse --show-toplevels) -k"))))
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/.doozer.json
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/.doozer.json b/thirdparty/librdkafka-0.11.4/.doozer.json
new file mode 100644
index 0000000..27252da
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/.doozer.json
@@ -0,0 +1,110 @@
+{
+  "targets": {
+    "xenial-amd64": {
+
+      "buildenv": "xenial-amd64",
+      "builddeps": [
+        "build-essential",
+        "python",
+        "zlib1g-dev",
+        "libssl-dev",
+        "libsasl2-dev"
+      ],
+      "buildcmd": [
+        "./configure",
+          "make -j ${PARALLEL}",
+          "make -C tests build"
+      ],
+      "testcmd": [
+          "make -C tests run_local"
+      ],
+    },
+
+    "xenial-i386": {
+      "_comment": "including liblz4-dev here to verify that WITH_LZ4_EXT works",
+      "buildenv": "xenial-i386",
+      "builddeps": [
+        "build-essential",
+        "python",
+        "zlib1g-dev",
+        "libssl-dev",
+        "libsasl2-dev",
+        "liblz4-dev"
+      ],
+      "buildcmd": [
+        "./configure",
+        "make -j ${PARALLEL}",
+        "make -C tests build"
+      ],
+      "testcmd": [
+        "make -C tests run_local"
+      ],
+    },
+
+    "xenial-armhf": {
+
+      "buildenv": "xenial-armhf",
+      "builddeps": [
+        "build-essential",
+        "python",
+        "zlib1g-dev",
+        "libssl-dev",
+        "libsasl2-dev"
+      ],
+      "buildcmd": [
+        "./configure",
+        "make -j ${PARALLEL}",
+          "make -j ${PARALLEL} -C tests build",
+      ],
+      "testcmd": [
+        "cd tests",
+        "./run-test.sh -p1 -l ./merged",
+        "cd .."
+      ],
+    },
+
+    "stretch-mips": {
+
+      "buildenv": "stretch-mips",
+      "builddeps": [
+        "build-essential",
+        "python",
+        "zlib1g-dev",
+        "libssl-dev",
+        "libsasl2-dev"
+      ],
+      "buildcmd": [
+        "./configure",
+        "make -j ${PARALLEL}",
+          "make -j ${PARALLEL} -C tests build",
+      ],
+      "testcmd": [
+        "cd tests",
+        "./run-test.sh -p1 -l ./merged",
+        "cd .."
+      ],
+    },
+
+    "cmake-xenial-amd64": {
+
+      "buildenv": "xenial-amd64",
+      "builddeps": [
+        "build-essential",
+        "python",
+        "zlib1g-dev",
+        "libssl-dev",
+        "libsasl2-dev",
+        "cmake"
+      ],
+      "buildcmd": [
+        "cmake -H. -B_builds -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_BUILD_TYPE=Debug",
+        "cmake --build _builds",
+      ],
+      "testcmd": [
+        "cd _builds",
+        "ctest -VV -R RdKafkaTestBrokerLess"
+      ],
+    }
+  },
+  "artifacts": ["config.log", "Makefile.config", "config.h"]
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/.github/ISSUE_TEMPLATE
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/.github/ISSUE_TEMPLATE b/thirdparty/librdkafka-0.11.4/.github/ISSUE_TEMPLATE
new file mode 100644
index 0000000..eb538b3
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/.github/ISSUE_TEMPLATE
@@ -0,0 +1,32 @@
+Read the FAQ first: https://github.com/edenhill/librdkafka/wiki/FAQ
+
+
+
+Description
+===========
+<your issue description goes here>
+
+
+How to reproduce
+================
+<your steps how to reproduce goes here, or remove section if not relevant>
+
+
+**IMPORTANT**: Always try to reproduce the issue on the latest released version (see https://github.com/edenhill/librdkafka/releases), if it can't be reproduced on the latest version the issue has been fixed.
+
+
+Checklist
+=========
+
+**IMPORTANT**: We will close issues where the checklist has not been completed.
+
+Please provide the following information:
+
+ - [x] librdkafka version (release number or git tag): `<REPLACE with e.g., v0.10.5 or a git sha. NOT "latest" or "current">`
+ - [ ] Apache Kafka version: `<REPLACE with e.g., 0.10.2.3>`
+ - [ ] librdkafka client configuration: `<REPLACE with e.g., message.timeout.ms=123, auto.reset.offset=earliest, ..>`
+ - [ ] Operating system: `<REPLACE with e.g., Centos 5 (x64)>`
+ - [ ] Provide logs (with `debug=..` as necessary) from librdkafka
+ - [ ] Provide broker log excerpts
+ - [ ] Critical issue
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/.gitignore
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/.gitignore b/thirdparty/librdkafka-0.11.4/.gitignore
new file mode 100644
index 0000000..0598bca
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/.gitignore
@@ -0,0 +1,28 @@
+config.h
+config.log*
+config.cache
+Makefile.config
+rdkafka*.pc
+*~
+\#*
+*.o
+*.so
+*.so.?
+*.dylib
+*.a
+*.d
+librdkafka*.lds
+core
+vgcore.*
+*dSYM/
+*.offset
+SOURCES
+gmon.out
+*.gz
+*.bz2
+*.deb
+*.rpm
+staging-docs
+tmp
+stats*.json
+test_report*.json

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/.travis.yml
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/.travis.yml b/thirdparty/librdkafka-0.11.4/.travis.yml
new file mode 100644
index 0000000..4154de5
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/.travis.yml
@@ -0,0 +1,42 @@
+language: c
+cache: ccache
+env:
+- ARCH=x64
+compiler:
+- gcc
+- clang
+os:
+- linux
+- osx
+dist: trusty
+sudo: false
+before_install:
+  - if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then sudo make -C packaging/rpm MOCK_CONFIG=el7-x86_64 prepare_ubuntu ; fi
+before_script:
+ - ccache -s || echo "CCache is not available."
+script:
+- rm -rf artifacts dest
+- mkdir dest artifacts
+- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then CPPFLAGS="-I/usr/local/opt/openssl/include
+  -L/usr/local/opt/openssl/lib" ./configure --enable-static --disable-lz4 --prefix="$PWD/dest" ; else ./configure --enable-static --disable-lz4 --prefix="$PWD/dest" ; fi
+- make -j2 all examples check && make -C tests run_local
+- make install
+- (cd dest && tar cvzf ../artifacts/librdkafka.tar.gz .)
+- if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then sudo make -C packaging/rpm MOCK_CONFIG=el7-x86_64 all copy-artifacts ; fi
+- if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then docker run -it -v $PWD:/v microsoft/dotnet:2-sdk /v/packaging/tools/build-debian.sh /v /v/artifacts/librdkafka-debian9.tgz; fi
+deploy:
+  provider: s3
+  access_key_id:
+    secure: "nGcknL5JZ5XYCEJ96UeDtnLOOidWsfXrk2x91Z9Ip2AyrUtdfZBc8BX16C7SAQbBeb4PQu/OjRBQWTIRqU64ZEQU1Z0lHjxCiGEt5HO0YlXWvZ8OJGAQ0wSmrQED850lWjGW2z5MpDqqxbZyATE8VksW5dtGiHgNuITinVW8Lok="
+  secret_access_key:
+    secure: "J+LygNeoXQImN9E7EARNmcgLpqm6hoRjxwHJaen9opeuSDowKDpZxP7ixSml3BEn2pJJ4kpsdj5A8t5uius+qC4nu9mqSAZcmdKeSmliCbH7kj4J9MR7LBcXk3Uf515QGm7y4nzw+c1PmpteYL5S06Kgqp+KkPRLKTS2NevVZuY="
+  bucket: librdkafka-ci-packages
+  region: us-west-1
+  skip_cleanup: true
+  local-dir: artifacts
+  upload-dir: librdkafka/p-librdkafka__bld-travis__plat-${TRAVIS_OS_NAME}__arch-${ARCH}__tag-${TRAVIS_TAG}__sha-${TRAVIS_COMMIT}__bid-${TRAVIS_JOB_NUMBER}
+  on:
+    condition: "$CC = gcc"
+    repo: edenhill/librdkafka
+    all_branches: true
+    tags: true

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/CMakeLists.txt b/thirdparty/librdkafka-0.11.4/CMakeLists.txt
new file mode 100644
index 0000000..93379e2
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/CMakeLists.txt
@@ -0,0 +1,182 @@
+cmake_minimum_required(VERSION 3.2)
+project(RdKafka)
+
+# Options. No 'RDKAFKA_' prefix to match old C++ code. {
+
+# This option doesn't affect build in fact, only C code
+# (see 'rd_kafka_version_str'). In CMake the build type feature usually used
+# (like Debug, Release, etc.).
+option(WITHOUT_OPTIMIZATION "Disable optimization" OFF)
+
+option(ENABLE_DEVEL "Enable development asserts, checks, etc" OFF)
+option(ENABLE_REFCNT_DEBUG "Enable refcnt debugging" OFF)
+option(ENABLE_SHAREDPTR_DEBUG "Enable sharedptr debugging" OFF)
+
+set(TRYCOMPILE_SRC_DIR "${CMAKE_CURRENT_LIST_DIR}/packaging/cmake/try_compile")
+
+# ZLIB {
+find_package(ZLIB QUIET)
+if(ZLIB_FOUND)
+  set(with_zlib_default ON)
+else()
+  set(with_zlib_default OFF)
+endif()
+option(WITH_ZLIB "With ZLIB" ${with_zlib_default})
+# }
+
+# LibDL {
+try_compile(
+    WITH_LIBDL
+    "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+    "${TRYCOMPILE_SRC_DIR}/dlopen_test.c"
+    LINK_LIBRARIES "${CMAKE_DL_LIBS}"
+)
+# }
+
+# WITH_PLUGINS {
+if(WITH_LIBDL)
+  set(with_plugins_default ON)
+else()
+  set(with_plugins_default OFF)
+endif()
+option(WITH_PLUGINS "With plugin support" ${with_plugins_default})
+# }
+
+# OpenSSL {
+if(WITH_BUNDLED_SSL) # option from 'h2o' parent project
+  set(with_ssl_default ON)
+else()
+  find_package(OpenSSL QUIET)
+  if(OpenSSL_FOUND)
+    set(with_ssl_default ON)
+  else()
+    set(with_ssl_default OFF)
+  endif()
+endif()
+option(WITH_SSL "With SSL" ${with_ssl_default})
+# }
+
+# SASL {
+if(WIN32)
+  set(with_sasl_default ON)
+else()
+  include(FindPkgConfig)
+  pkg_check_modules(SASL libsasl2)
+  if(SASL_FOUND)
+    set(with_sasl_default ON)
+  else()
+    try_compile(
+        WITH_SASL_CYRUS_BOOL
+        "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+        "${TRYCOMPILE_SRC_DIR}/libsasl2_test.c"
+        LINK_LIBRARIES "-lsasl2"
+        )
+     if(WITH_SASL_CYRUS_BOOL)
+        set(with_sasl_default ON)
+        set(SASL_LIBRARIES "-lsasl2")
+     else()
+        set(with_sasl_default OFF)
+     endif()
+  endif()
+endif()
+option(WITH_SASL "With SASL" ${with_sasl_default})
+if(WITH_SASL)
+  if(WITH_SSL)
+    set(WITH_SASL_SCRAM ON)
+  endif()
+  if(NOT WIN32)
+    set(WITH_SASL_CYRUS ON)
+  endif()
+endif()
+# }
+
+# }
+
+option(RDKAFKA_BUILD_EXAMPLES "Build examples" OFF)
+option(RDKAFKA_BUILD_TESTS "Build tests" OFF)
+if(WIN32)
+    option(WITHOUT_WIN32_CONFIG "Avoid including win32_config.h on cmake builds" ON)
+endif(WIN32)
+
+# In:
+# * TRYCOMPILE_SRC_DIR
+# Out:
+# * HAVE_ATOMICS_32
+# * HAVE_ATOMICS_32_SYNC
+# * HAVE_ATOMICS_64
+# * HAVE_ATOMICS_64_SYNC
+# * HAVE_REGEX
+# * HAVE_STRNDUP
+# * LINK_ATOMIC
+include("packaging/cmake/try_compile/rdkafka_setup.cmake")
+
+set(GENERATED_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+
+# In:
+# * WITHOUT_OPTIMIZATION
+# * ENABLE_DEVEL
+# * ENABLE_REFCNT_DEBUG
+# * ENABLE_SHAREDPTR_DEBUG
+# * HAVE_ATOMICS_32
+# * HAVE_ATOMICS_32_SYNC
+# * HAVE_ATOMICS_64
+# * HAVE_ATOMICS_64_SYNC
+# * WITH_ZLIB
+# * WITH_SSL
+# * WITH_SASL
+# * HAVE_REGEX
+# * HAVE_STRNDUP
+configure_file("packaging/cmake/config.h.in" "${GENERATED_DIR}/config.h")
+
+# Installation (https://github.com/forexample/package-example) {
+
+include(GNUInstallDirs)
+
+set(config_install_dir "lib/cmake/${PROJECT_NAME}")
+
+set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
+
+set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake")
+set(targets_export_name "${PROJECT_NAME}Targets")
+set(namespace "${PROJECT_NAME}::")
+
+include(CMakePackageConfigHelpers)
+
+# In:
+#   * targets_export_name
+#   * PROJECT_NAME
+configure_package_config_file(
+    "packaging/cmake/Config.cmake.in"
+    "${project_config}"
+    INSTALL_DESTINATION "${config_install_dir}"
+)
+
+install(
+    FILES "${project_config}"
+    DESTINATION "${config_install_dir}"
+)
+
+install(
+    EXPORT "${targets_export_name}"
+    NAMESPACE "${namespace}"
+    DESTINATION "${config_install_dir}"
+)
+
+install(
+    FILES LICENSES.txt
+    DESTINATION "share/licenses/librdkafka"
+)
+
+# }
+
+add_subdirectory(src)
+add_subdirectory(src-cpp)
+
+if(RDKAFKA_BUILD_EXAMPLES)
+  add_subdirectory(examples)
+endif()
+
+if(RDKAFKA_BUILD_TESTS)
+  enable_testing()
+  add_subdirectory(tests)
+endif()

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/CODE_OF_CONDUCT.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/CODE_OF_CONDUCT.md b/thirdparty/librdkafka-0.11.4/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..dbbde19
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at rdkafka@edenhill.se. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/CONFIGURATION.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/CONFIGURATION.md b/thirdparty/librdkafka-0.11.4/CONFIGURATION.md
new file mode 100644
index 0000000..7bc060f
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/CONFIGURATION.md
@@ -0,0 +1,138 @@
+//@file
+## Global configuration properties
+
+Property                                 | C/P | Range           |       Default | Description              
+-----------------------------------------|-----|-----------------|--------------:|--------------------------
+builtin.features                         |  *  |                 | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support. <br>*Type: CSV flags*
+client.id                                |  *  |                 |       rdkafka | Client identifier. <br>*Type: string*
+metadata.broker.list                     |  *  |                 |               | Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime. <br>*Type: string*
+bootstrap.servers                        |  *  |                 |               | Alias for `metadata.broker.list`
+message.max.bytes                        |  *  | 1000 .. 1000000000 |       1000000 | Maximum Kafka protocol request message size. <br>*Type: integer*
+message.copy.max.bytes                   |  *  | 0 .. 1000000000 |         65535 | Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs. <br>*Type: integer*
+receive.message.max.bytes                |  *  | 1000 .. 2147483647 |     100000000 | Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value is automatically adjusted upwards to be at least `fetch.max.bytes` + 512 to allow for protocol overhead. <br>*Type: integer*
+max.in.flight.requests.per.connection    |  *  | 1 .. 1000000    |       1000000 | Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one. <br>*Type: integer*
+max.in.flight                            |  *  |                 |               | Alias for `max.in.flight.requests.per.connection`
+metadata.request.timeout.ms              |  *  | 10 .. 900000    |         60000 | Non-topic request timeout in milliseconds. This is for metadata requests, etc. <br>*Type: integer*
+topic.metadata.refresh.interval.ms       |  *  | -1 .. 3600000   |        300000 | Topic metadata refresh interval in milliseconds. The metadata is automatically refreshed on error and connect. Use -1 to disable the intervalled refresh. <br>*Type: integer*
+metadata.max.age.ms                      |  *  | 1 .. 86400000   |            -1 | Metadata cache max age. Defaults to metadata.refresh.interval.ms * 3 <br>*Type: integer*
+topic.metadata.refresh.fast.interval.ms  |  *  | 1 .. 60000      |           250 | When a topic loses its leader a new metadata request will be enqueued with this initial interval, exponentially increasing until the topic metadata has been refreshed. This is used to recover quickly from transitioning leader brokers. <br>*Type: integer*
+topic.metadata.refresh.fast.cnt          |  *  | 0 .. 1000       |            10 | *Deprecated: No longer used.* <br>*Type: integer*
+topic.metadata.refresh.sparse            |  *  | true, false     |          true | Sparse metadata requests (consumes less network bandwidth) <br>*Type: boolean*
+topic.blacklist                          |  *  |                 |               | Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist. <br>*Type: pattern list*
+debug                                    |  *  | generic, broker, topic, metadata, feature, queue, msg, protocol, cgrp, security, fetch, interceptor, plugin, consumer, all |               | A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch <br>*Type: CSV flags*
+socket.timeout.ms                        |  *  | 10 .. 300000    |         60000 | Default timeout for network requests. Producer: ProduceRequests will use the lesser value of socket.timeout.ms and remaining message.timeout.ms for the first message in the batch. Consumer: FetchRequests will use fetch.wait.max.ms + socket.timeout.ms.  <br>*Type: integer*
+socket.blocking.max.ms                   |  *  | 1 .. 60000      |          1000 | Maximum time a broker socket operation may block. A lower value improves responsiveness at the expense of slightly higher CPU usage. **Deprecated** <br>*Type: integer*
+socket.send.buffer.bytes                 |  *  | 0 .. 100000000  |             0 | Broker socket send buffer size. System default is used if 0. <br>*Type: integer*
+socket.receive.buffer.bytes              |  *  | 0 .. 100000000  |             0 | Broker socket receive buffer size. System default is used if 0. <br>*Type: integer*
+socket.keepalive.enable                  |  *  | true, false     |         false | Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets <br>*Type: boolean*
+socket.nagle.disable                     |  *  | true, false     |         false | Disable the Nagle algorithm (TCP_NODELAY). <br>*Type: boolean*
+socket.max.fails                         |  *  | 0 .. 1000000    |             1 | Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. NOTE: The connection is automatically re-established. <br>*Type: integer*
+broker.address.ttl                       |  *  | 0 .. 86400000   |          1000 | How long to cache the broker address resolving results (milliseconds). <br>*Type: integer*
+broker.address.family                    |  *  | any, v4, v6     |           any | Allowed broker IP address families: any, v4, v6 <br>*Type: enum value*
+reconnect.backoff.jitter.ms              |  *  | 0 .. 3600000    |           500 | Throttle broker reconnection attempts by this value +-50%. <br>*Type: integer*
+statistics.interval.ms                   |  *  | 0 .. 86400000   |             0 | librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics. <br>*Type: integer*
+enabled_events                           |  *  | 0 .. 2147483647 |             0 | See `rd_kafka_conf_set_events()` <br>*Type: integer*
+error_cb                                 |  *  |                 |               | Error callback (set with rd_kafka_conf_set_error_cb()) <br>*Type: pointer*
+throttle_cb                              |  *  |                 |               | Throttle callback (set with rd_kafka_conf_set_throttle_cb()) <br>*Type: pointer*
+stats_cb                                 |  *  |                 |               | Statistics callback (set with rd_kafka_conf_set_stats_cb()) <br>*Type: pointer*
+log_cb                                   |  *  |                 |               | Log callback (set with rd_kafka_conf_set_log_cb()) <br>*Type: pointer*
+log_level                                |  *  | 0 .. 7          |             6 | Logging level (syslog(3) levels) <br>*Type: integer*
+log.queue                                |  *  | true, false     |         false | Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set. <br>*Type: boolean*
+log.thread.name                          |  *  | true, false     |          true | Print internal thread name in log messages (useful for debugging librdkafka internals) <br>*Type: boolean*
+log.connection.close                     |  *  | true, false     |          true | Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connection.max.idle.ms` value. <br>*Type: boolean*
+socket_cb                                |  *  |                 |               | Socket creation callback to provide race-free CLOEXEC <br>*Type: pointer*
+connect_cb                               |  *  |                 |               | Socket connect callback <br>*Type: pointer*
+closesocket_cb                           |  *  |                 |               | Socket close callback <br>*Type: pointer*
+open_cb                                  |  *  |                 |               | File open callback to provide race-free CLOEXEC <br>*Type: pointer*
+opaque                                   |  *  |                 |               | Application opaque (set with rd_kafka_conf_set_opaque()) <br>*Type: pointer*
+default_topic_conf                       |  *  |                 |               | Default topic configuration for automatically subscribed topics <br>*Type: pointer*
+internal.termination.signal              |  *  | 0 .. 128        |             0 | Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed. <br>*Type: integer*
+api.version.request                      |  *  | true, false     |          true | Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used. <br>*Type: boolean*
+api.version.request.timeout.ms           |  *  | 1 .. 300000     |         10000 | Timeout for broker API version requests. <br>*Type: integer*
+api.version.fallback.ms                  |  *  | 0 .. 604800000  |       1200000 | Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade). <br>*Type: integer*
+broker.version.fallback                  |  *  |                 |         0.9.0 | Older broker versions (<0.10.0) provides no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value, such as 0.10.2.1, enables ApiVersionRequests. <br>*Type: string*
+security.protocol                        |  *  | plaintext, ssl, sasl_plaintext, sasl_ssl |     plaintext | Protocol used to communicate with brokers. <br>*Type: enum value*
+ssl.cipher.suites                        |  *  |                 |               | A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3). <br>*Type: string*
+ssl.key.location                         |  *  |                 |               | Path to client's private key (PEM) used for authentication. <br>*Type: string*
+ssl.key.password                         |  *  |                 |               | Private key passphrase <br>*Type: string*
+ssl.certificate.location                 |  *  |                 |               | Path to client's public key (PEM) used for authentication. <br>*Type: string*
+ssl.ca.location                          |  *  |                 |               | File or directory path to CA certificate(s) for verifying the broker's key. <br>*Type: string*
+ssl.crl.location                         |  *  |                 |               | Path to CRL for verifying broker's certificate validity. <br>*Type: string*
+ssl.keystore.location                    |  *  |                 |               | Path to client's keystore (PKCS#12) used for authentication. <br>*Type: string*
+ssl.keystore.password                    |  *  |                 |               | Client's keystore (PKCS#12) password. <br>*Type: string*
+sasl.mechanisms                          |  *  |                 |        GSSAPI | SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. **NOTE**: Despite the name only one mechanism must be configured. <br>*Type: string*
+sasl.mechanism                           |  *  |                 |               | Alias for `sasl.mechanisms`
+sasl.kerberos.service.name               |  *  |                 |         kafka | Kerberos principal name that Kafka runs as, not including /hostname@REALM <br>*Type: string*
+sasl.kerberos.principal                  |  *  |                 |   kafkaclient | This client's Kerberos principal name. (Not supported on Windows, will use the logon user's principal). <br>*Type: string*
+sasl.kerberos.kinit.cmd                  |  *  |                 | kinit -S "%{sasl.kerberos.service.name}/%{broker.name}" -k -t "%{sasl.kerberos.keytab}" %{sasl.kerberos.principal} | Full kerberos kinit command string, %{config.prop.name} is replaced by corresponding config object value, %{broker.name} returns the broker's hostname. <br>*Type: string*
+sasl.kerberos.keytab                     |  *  |                 |               | Path to Kerberos keytab file. Uses system default if not set.**NOTE**: This is not automatically used but must be added to the template in sasl.kerberos.kinit.cmd as ` ... -t %{sasl.kerberos.keytab}`. <br>*Type: string*
+sasl.kerberos.min.time.before.relogin    |  *  | 1 .. 86400000   |         60000 | Minimum time in milliseconds between key refresh attempts. <br>*Type: integer*
+sasl.username                            |  *  |                 |               | SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms <br>*Type: string*
+sasl.password                            |  *  |                 |               | SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism <br>*Type: string*
+plugin.library.paths                     |  *  |                 |               | List of plugin libaries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically. <br>*Type: string*
+interceptors                             |  *  |                 |               | Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors. <br>*Type: *
+group.id                                 |  *  |                 |               | Client group id string. All clients sharing the same group.id belong to the same group. <br>*Type: string*
+partition.assignment.strategy            |  *  |                 | range,roundrobin | Name of partition assignment strategy to use when elected group leader assigns partitions to group members. <br>*Type: string*
+session.timeout.ms                       |  *  | 1 .. 3600000    |         30000 | Client group session and failure detection timeout. <br>*Type: integer*
+heartbeat.interval.ms                    |  *  | 1 .. 3600000    |          1000 | Group session keepalive heartbeat interval. <br>*Type: integer*
+group.protocol.type                      |  *  |                 |      consumer | Group protocol type <br>*Type: string*
+coordinator.query.interval.ms            |  *  | 1 .. 3600000    |        600000 | How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment. <br>*Type: integer*
+enable.auto.commit                       |  C  | true, false     |          true | Automatically and periodically commit offsets in the background. Note: setting this to false does not prevent the consumer from fetching previously committed start offsets. To circumvent this behaviour set specific start offsets per partition in the call to assign(). <br>*Type: boolean*
+auto.commit.interval.ms                  |  C  | 0 .. 86400000   |          5000 | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. (0 = disable). This setting is used by the high-level consumer. <br>*Type: integer*
+enable.auto.offset.store                 |  C  | true, false     |          true | Automatically store offset of last message provided to application. <br>*Type: boolean*
+queued.min.messages                      |  C  | 1 .. 10000000   |        100000 | Minimum number of messages per topic+partition librdkafka tries to maintain in the local consumer queue. <br>*Type: integer*
+queued.max.messages.kbytes               |  C  | 1 .. 2097151    |       1048576 | Maximum number of kilobytes per topic+partition in the local consumer queue. This value may be overshot by fetch.message.max.bytes. This property has higher priority than queued.min.messages. <br>*Type: integer*
+fetch.wait.max.ms                        |  C  | 0 .. 300000     |           100 | Maximum time the broker may wait to fill the response with fetch.min.bytes. <br>*Type: integer*
+fetch.message.max.bytes                  |  C  | 1 .. 1000000000 |       1048576 | Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched. <br>*Type: integer*
+max.partition.fetch.bytes                |  C  |                 |               | Alias for `fetch.message.max.bytes`
+fetch.max.bytes                          |  C  | 0 .. 2147483135 |      52428800 | Maximum amount of data the broker shall return for a Fetch request. Messages are fetched in batches by the consumer and if the first message batch in the first non-empty partition of the Fetch request is larger than this value, then the message batch will still be returned to ensure the consumer can make progress. The maximum message batch size accepted by the broker is defined via `message.max.bytes` (broker config) or `max.message.bytes` (broker topic config). `fetch.max.bytes` is automatically adjusted upwards to be at least `message.max.bytes` (consumer config). <br>*Type: integer*
+fetch.min.bytes                          |  C  | 1 .. 100000000  |             1 | Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting. <br>*Type: integer*
+fetch.error.backoff.ms                   |  C  | 0 .. 300000     |           500 | How long to postpone the next fetch request for a topic+partition in case of a fetch error. <br>*Type: integer*
+offset.store.method                      |  C  | none, file, broker |        broker | Offset commit store method: 'file' - local file store (offset.store.path, et.al), 'broker' - broker commit store (requires Apache Kafka 0.8.2 or later on the broker). <br>*Type: enum value*
+consume_cb                               |  C  |                 |               | Message consume callback (set with rd_kafka_conf_set_consume_cb()) <br>*Type: pointer*
+rebalance_cb                             |  C  |                 |               | Called after consumer group has been rebalanced (set with rd_kafka_conf_set_rebalance_cb()) <br>*Type: pointer*
+offset_commit_cb                         |  C  |                 |               | Offset commit result propagation callback. (set with rd_kafka_conf_set_offset_commit_cb()) <br>*Type: pointer*
+enable.partition.eof                     |  C  | true, false     |          true | Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition. <br>*Type: boolean*
+check.crcs                               |  C  | true, false     |         false | Verify CRC32 of consumed messages, ensuring no on-the-wire or on-disk corruption to the messages occurred. This check comes at slightly increased CPU usage. <br>*Type: boolean*
+queue.buffering.max.messages             |  P  | 1 .. 10000000   |        100000 | Maximum number of messages allowed on the producer queue. <br>*Type: integer*
+queue.buffering.max.kbytes               |  P  | 1 .. 2097151    |       1048576 | Maximum total message size sum allowed on the producer queue. This property has higher priority than queue.buffering.max.messages. <br>*Type: integer*
+queue.buffering.max.ms                   |  P  | 0 .. 900000     |             0 | Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency. <br>*Type: integer*
+linger.ms                                |  P  |                 |               | Alias for `queue.buffering.max.ms`
+message.send.max.retries                 |  P  | 0 .. 10000000   |             2 | How many times to retry sending a failing MessageSet. **Note:** retrying may cause reordering. <br>*Type: integer*
+retries                                  |  P  |                 |               | Alias for `message.send.max.retries`
+retry.backoff.ms                         |  P  | 1 .. 300000     |           100 | The backoff time in milliseconds before retrying a protocol request. <br>*Type: integer*
+queue.buffering.backpressure.threshold   |  P  | 0 .. 1000000    |            10 | The threshold of outstanding not yet transmitted requests needed to backpressure the producer's message accumulator. A lower number yields larger and more effective batches. <br>*Type: integer*
+compression.codec                        |  P  | none, gzip, snappy, lz4 |          none | compression codec to use for compressing message sets. This is the default value for all topics, may be overriden by the topic configuration property `compression.codec`.  <br>*Type: enum value*
+compression.type                         |  P  |                 |               | Alias for `compression.codec`
+batch.num.messages                       |  P  | 1 .. 1000000    |         10000 | Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by message.max.bytes. <br>*Type: integer*
+delivery.report.only.error               |  P  | true, false     |         false | Only provide delivery reports for failed messages. <br>*Type: boolean*
+dr_cb                                    |  P  |                 |               | Delivery report callback (set with rd_kafka_conf_set_dr_cb()) <br>*Type: pointer*
+dr_msg_cb                                |  P  |                 |               | Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb()) <br>*Type: pointer*
+
+
+## Topic configuration properties
+
+Property                                 | C/P | Range           |       Default | Description              
+-----------------------------------------|-----|-----------------|--------------:|--------------------------
+request.required.acks                    |  P  | -1 .. 1000      |             1 | This field indicates how many acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *1*=Only the leader broker will need to ack the message, *-1* or *all*=broker will block until message is committed by all in sync replicas (ISRs) or broker's `min.insync.replicas` setting before sending response.  <br>*Type: integer*
+acks                                     |  P  |                 |               | Alias for `request.required.acks`
+request.timeout.ms                       |  P  | 1 .. 900000     |          5000 | The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0. <br>*Type: integer*
+message.timeout.ms                       |  P  | 0 .. 900000     |        300000 | Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. <br>*Type: integer*
+queuing.strategy                         |  P  | fifo, lifo      |          fifo | Producer queuing strategy. FIFO preserves produce ordering, while LIFO prioritizes new messages. WARNING: `lifo` is experimental and subject to change or removal. <br>*Type: enum value*
+produce.offset.report                    |  P  | true, false     |         false | Report offset of produced message back to application. The application must be use the `dr_msg_cb` to retrieve the offset from `rd_kafka_message_t.offset`. <br>*Type: boolean*
+partitioner                              |  P  |                 | consistent_random | Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.). <br>*Type: string*
+partitioner_cb                           |  P  |                 |               | Custom partitioner callback (set with rd_kafka_topic_conf_set_partitioner_cb()) <br>*Type: pointer*
+msg_order_cmp                            |  P  |                 |               | Message queue ordering comparator (set with rd_kafka_topic_conf_set_msg_order_cmp()). Also see `queuing.strategy`. <br>*Type: pointer*
+opaque                                   |  *  |                 |               | Application opaque (set with rd_kafka_topic_conf_set_opaque()) <br>*Type: pointer*
+compression.codec                        |  P  | none, gzip, snappy, lz4, inherit |       inherit | Compression codec to use for compressing message sets. inherit = inherit global compression.codec configuration. <br>*Type: enum value*
+compression.type                         |  P  |                 |               | Alias for `compression.codec`
+auto.commit.enable                       |  C  | true, false     |          true | If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). **NOTE:** This property should only be used with the simple legacy consumer, when using the high-level KafkaConsumer the global `enable.auto.commit` property must be used instead. **NOTE:** There is currently no zookeeper integration, offsets will be written to broker or local file according to offset.store.method. <br>*Type: boolean*
+enable.auto.commit                       |  C  |                 |               | Alias for `auto.commit.enable`
+auto.commit.interval.ms                  |  C  | 10 .. 86400000  |         60000 | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. This setting is used by the low-level legacy consumer. <br>*Type: integer*
+auto.offset.reset                        |  C  | smallest, earliest, beginning, largest, latest, end, error |       largest | Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error which is retrieved by consuming messages and checking 'message->err'. <br>*Type: enum value*
+offset.store.path                        |  C  |                 |             . | Path to local file for storing offsets. If the path is a directory a filename will be automatically generated in that directory based on the topic and partition. <br>*Type: string*
+offset.store.sync.interval.ms            |  C  | -1 .. 86400000  |            -1 | fsync() interval for the offset file, in milliseconds. Use -1 to disable syncing, and 0 for immediate sync after each write. <br>*Type: integer*
+offset.store.method                      |  C  | file, broker    |        broker | Offset commit store method: 'file' - local file store (offset.store.path, et.al), 'broker' - broker commit store (requires "group.id" to be configured and Apache Kafka 0.8.2 or later on the broker.). <br>*Type: enum value*
+consume.callback.max.messages            |  C  | 0 .. 1000000    |             0 | Maximum number of messages to dispatch in one `rd_kafka_consume_callback*()` call (0 = unlimited) <br>*Type: integer*
+
+### C/P legend: C = Consumer, P = Producer, * = both

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/CONTRIBUTING.md b/thirdparty/librdkafka-0.11.4/CONTRIBUTING.md
new file mode 100644
index 0000000..5da7c77
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/CONTRIBUTING.md
@@ -0,0 +1,271 @@
+# Contributing to librdkafka
+
+(This document is based on [curl's CONTRIBUTE.md](https://github.com/curl/curl/blob/master/docs/CONTRIBUTE.md) - thank you!)
+
+This document is intended to offer guidelines on how to best contribute to the
+librdkafka project. This concerns new features as well as bug fixes and
+general improvements.
+
+### License and copyright
+
+When contributing with code, you agree to put your changes and new code under
+the same license librdkafka is already using unless stated and agreed
+otherwise.
+
+When changing existing source code, you do not alter the copyright of the
+original file(s). The copyright will still be owned by the original creator(s)
+or those who have been assigned copyright by the original author(s).
+
+By submitting a patch to the librdkafka, you are assumed to have the right
+to the code and to be allowed by your employer or whatever to hand over that
+patch/code to us. We will credit you for your changes as far as possible, to
+give credit but also to keep a trace back to who made what changes. Please
+always provide us with your full real name when contributing!
+
+Official librdkafka project maintainer(s) assume ownership of all accepted
+submissions.
+
+## Write a good patch
+
+### Follow code style
+
+When writing C code, follow the code style already established in
+the project. Consistent style makes code easier to read and mistakes less
+likely to happen.
+
+See the end of this document for the C style guide to use in librdkafka.
+
+
+### Write Separate Changes
+
+It is annoying when you get a huge patch from someone that is said to fix 511
+odd problems, but discussions and opinions don't agree with 510 of them - or
+509 of them were already fixed in a different way. Then the person merging
+this change needs to extract the single interesting patch from somewhere
+within the huge pile of source, and that gives a lot of extra work.
+
+Preferably, each fix that correct a problem should be in its own patch/commit
+with its own description/commit message stating exactly what they correct so
+that all changes can be selectively applied by the maintainer or other
+interested parties.
+
+Also, separate changes enable bisecting much better when we track problems
+and regression in the future.
+
+### Patch Against Recent Sources
+
+Please try to make your patches against latest master branch.
+
+### Test Cases
+
+Bugfixes should also include a new test case in the regression test suite
+that verifies the bug is fixed.
+Create a new tests/00<freenumber>-<short_bug_description>.c file and
+try to reproduce the issue in its most simple form.
+Verify that the test case fails for earlier versions and passes with your
+bugfix in-place.
+
+New features and APIs should also result in an added test case.
+
+Submitted patches must pass all existing tests.
+For more information on the test suite see [tests/README]
+
+
+
+## How to get your changes into the main sources
+
+File a [pull request on github](https://github.com/edenhill/librdkafka/pulls)
+
+Your change will be reviewed and discussed there and you will be
+expected to correct flaws pointed out and update accordingly, or the change
+risk stalling and eventually just get deleted without action. As a submitter
+of a change, you are the owner of that change until it has been merged.
+
+Make sure to monitor your PR on github and answer questions and/or
+fix nits/flaws. This is very important. We will take lack of replies as a
+sign that you're not very anxious to get your patch accepted and we tend to
+simply drop such changes.
+
+When you adjust your pull requests after review, please squash the
+commits so that we can review the full updated version more easily
+and keep history cleaner.
+
+For example:
+
+    # Interactive rebase to let you squash/fixup commits
+    $ git rebase -i master
+
+    # Mark fixes-on-fixes commits as 'fixup' (or just 'f') in the
+    # first column. These will be silently integrated into the
+    # previous commit, so make sure to move the fixup-commit to
+    # the line beneath the parent commit.
+
+    # Since this probably rewrote the history of previously pushed
+    # commits you will need to make a force push, which is usually
+    # a bad idea but works good for pull requests.
+    $ git push --force origin your_feature_branch
+
+
+### Write good commit messages
+
+A short guide to how to write commit messages in the curl project.
+
+    ---- start ----
+    [area]: [short line describing the main effect] [(#issuenumber)]
+           -- empty line --
+    [full description, no wider than 72 columns that describe as much as
+    possible as to why this change is made, and possibly what things
+    it fixes and everything else that is related]
+    ---- stop ----
+
+Example:
+
+    cgrp: restart query timer on all heartbeat failures (#10023)
+    
+    If unhandled errors were received in HeartbeatResponse
+    the cgrp could get stuck in a state where it would not
+    refresh its coordinator.
+
+
+
+# librdkafka C style guide
+
+## Function and globals naming
+
+Use self-explanatory hierarchical snake-case naming.
+Pretty much all symbols should start with `rd_kafka_`, followed by
+their subsystem (e.g., `cgrp`, `broker`, `buf`, etc..), followed by an
+action (e.g, `find`, `get`, `clear`, ..).
+
+
+## Variable naming
+
+For existing types use the type prefix as variable name.
+The type prefix is typically the first part of struct member fields.
+Example:
+
+  * `rd_kafka_broker_t` has field names starting with `rkb_..`, thus broker
+     variable names should be named `rkb`
+
+
+For other types use reasonably concise but descriptive names.
+`i` and `j` are typical int iterators.
+
+## Variable declaration
+
+Variables must be declared at the head of a scope, no in-line variable
+declarations are allowed.
+
+## Indenting
+
+Use 8 spaces indent, same as the Linux kernel.
+In emacs, use `c-set-style "linux`.
+For C++, use Google's C++ style.
+
+## Comments
+
+Use `/* .. */` comments, not `// ..`
+
+For functions, use doxygen syntax, e.g.:
+
+    /**
+     * @brief <short description>
+     * ..
+     * @returns <something..>
+     */
+
+
+Make sure to comment non-obvious code and situations where the full
+context of an operation is not easily graspable.
+
+Also make sure to update existing comments when the code changes.
+
+
+## Line length
+
+Try hard to keep line length below 80 characters, when this is not possible
+exceed it with reason.
+
+
+## Braces
+
+Braces go on the same line as their enveloping statement:
+
+    int some_func (..) {
+      while (1) {
+        if (1) {
+          do something;
+          ..
+        } else {
+          do something else;
+          ..
+        }
+      }
+ 
+      /* Single line scopes should not have braces */
+      if (1)
+        hi();
+      else if (2)
+        /* Say hello */
+        hello();
+      else
+        bye();
+
+
+## Spaces
+
+All expression parentheses should be prefixed and suffixed with a single space:
+
+    int some_func (int a) {
+
+        if (1)
+          ....;
+
+        for (i = 0 ; i < 19 ; i++) {
+
+
+        }
+    }
+
+
+Use space around operators:
+
+    int a = 2;
+  
+    if (b >= 3)
+       c += 2;
+
+Except for these:
+  
+    d++;
+    --e;
+
+
+## New block on new line
+
+New blocks should be on a new line:
+
+    if (1)
+      new();
+    else
+      old();
+
+
+## Parentheses
+
+Don't assume the reader knows C operator precedence by heart for complex
+statements, add parentheses to ease readability.
+
+
+## ifdef hell
+
+Avoid ifdef's as much as possible.
+Platform support checking should be performed in configure.librdkafka.
+
+
+
+
+
+# librdkafka C++ style guide
+
+Follow [Google's C++ style guide](https://google.github.io/styleguide/cppguide.html)


[21/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_partition.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_partition.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_partition.h
deleted file mode 100644
index 8721f67..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_partition.h
+++ /dev/null
@@ -1,636 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#pragma once
-
-#include "rdkafka_topic.h"
-#include "rdkafka_cgrp.h"
-#include "rdkafka_broker.h"
-
-extern const char *rd_kafka_fetch_states[];
-
-
-/**
- * @brief Offset statistics
- */
-struct offset_stats {
-        int64_t fetch_offset; /**< Next offset to fetch */
-        int64_t eof_offset;   /**< Last offset we reported EOF for */
-        int64_t hi_offset;    /**< Current broker hi offset */
-};
-
-/**
- * @brief Reset offset_stats struct to default values
- */
-static RD_UNUSED void rd_kafka_offset_stats_reset (struct offset_stats *offs) {
-        offs->fetch_offset = 0;
-        offs->eof_offset = RD_KAFKA_OFFSET_INVALID;
-        offs->hi_offset = RD_KAFKA_OFFSET_INVALID;
-}
-
-
-
-/**
- * Topic + Partition combination
- */
-struct rd_kafka_toppar_s { /* rd_kafka_toppar_t */
-	TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rklink;  /* rd_kafka_t link */
-	TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rkblink; /* rd_kafka_broker_t link*/
-        CIRCLEQ_ENTRY(rd_kafka_toppar_s) rktp_fetchlink; /* rkb_fetch_toppars */
-	TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rktlink; /* rd_kafka_itopic_t link*/
-        TAILQ_ENTRY(rd_kafka_toppar_s) rktp_cgrplink;/* rd_kafka_cgrp_t link */
-        rd_kafka_itopic_t       *rktp_rkt;
-        shptr_rd_kafka_itopic_t *rktp_s_rkt;  /* shared pointer for rktp_rkt */
-	int32_t            rktp_partition;
-        //LOCK: toppar_lock() + topic_wrlock()
-        //LOCK: .. in partition_available()
-        int32_t            rktp_leader_id;   /**< Current leader broker id.
-                                              *   This is updated directly
-                                              *   from metadata. */
-	rd_kafka_broker_t *rktp_leader;      /**< Current leader broker
-                                              *   This updated asynchronously
-                                              *   by issuing JOIN op to
-                                              *   broker thread, so be careful
-                                              *   in using this since it
-                                              *   may lag. */
-        rd_kafka_broker_t *rktp_next_leader; /**< Next leader broker after
-                                              *   async migration op. */
-	rd_refcnt_t        rktp_refcnt;
-	mtx_t              rktp_lock;
-
-        //LOCK: toppar_lock. Should move the lock inside the msgq instead
-        //LOCK: toppar_lock. toppar_insert_msg(), concat_msgq()
-        //LOCK: toppar_lock. toppar_enq_msg(), deq_msg(), insert_msgq()
-        int                rktp_msgq_wakeup_fd; /* Wake-up fd */
-	rd_kafka_msgq_t    rktp_msgq;      /* application->rdkafka queue.
-					    * protected by rktp_lock */
-	rd_kafka_msgq_t    rktp_xmit_msgq; /* internal broker xmit queue */
-
-        int                rktp_fetch;     /* On rkb_fetch_toppars list */
-
-	/* Consumer */
-	rd_kafka_q_t      *rktp_fetchq;          /* Queue of fetched messages
-						  * from broker.
-                                                  * Broker thread -> App */
-        rd_kafka_q_t      *rktp_ops;             /* * -> Main thread */
-
-
-	/**
-	 * rktp version barriers
-	 *
-	 * rktp_version is the application/controller side's
-	 * authoritative version, it depicts the most up to date state.
-	 * This is what q_filter() matches an rko_version to.
-	 *
-	 * rktp_op_version is the last/current received state handled
-	 * by the toppar in the broker thread. It is updated to rktp_version
-	 * when receiving a new op.
-	 *
-	 * rktp_fetch_version is the current fetcher decision version.
-	 * It is used in fetch_decide() to see if the fetch decision
-	 * needs to be updated by comparing to rktp_op_version.
-	 *
-	 * Example:
-	 *   App thread   : Send OP_START (v1 bump): rktp_version=1
-	 *   Broker thread: Recv OP_START (v1): rktp_op_version=1
-	 *   Broker thread: fetch_decide() detects that
-	 *                  rktp_op_version != rktp_fetch_version and
-	 *                  sets rktp_fetch_version=1.
-	 *   Broker thread: next Fetch request has it's tver state set to
-	 *                  rktp_fetch_verison (v1).
-	 *
-	 *   App thread   : Send OP_SEEK (v2 bump): rktp_version=2
-	 *   Broker thread: Recv OP_SEEK (v2): rktp_op_version=2
-	 *   Broker thread: Recv IO FetchResponse with tver=1,
-	 *                  when enqueued on rktp_fetchq they're discarded
-	 *                  due to old version (tver<rktp_version).
-	 *   Broker thread: fetch_decide() detects version change and
-	 *                  sets rktp_fetch_version=2.
-	 *   Broker thread: next Fetch request has tver=2
-	 *   Broker thread: Recv IO FetchResponse with tver=2 which
-	 *                  is same as rktp_version so message is forwarded
-	 *                  to app.
-	 */
-        rd_atomic32_t      rktp_version;         /* Latest op version.
-                                                  * Authoritative (app thread)*/
-	int32_t            rktp_op_version;      /* Op version of curr command
-						  * state from.
-						  * (broker thread) */
-        int32_t            rktp_fetch_version;   /* Op version of curr fetch.
-                                                    (broker thread) */
-
-	enum {
-		RD_KAFKA_TOPPAR_FETCH_NONE = 0,
-                RD_KAFKA_TOPPAR_FETCH_STOPPING,
-                RD_KAFKA_TOPPAR_FETCH_STOPPED,
-		RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY,
-		RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT,
-		RD_KAFKA_TOPPAR_FETCH_ACTIVE,
-	} rktp_fetch_state;           /* Broker thread's state */
-
-#define RD_KAFKA_TOPPAR_FETCH_IS_STARTED(fetch_state) \
-        ((fetch_state) >= RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
-
-	int32_t            rktp_fetch_msg_max_bytes; /* Max number of bytes to
-                                                      * fetch.
-                                                      * Locality: broker thread
-                                                      */
-
-        rd_ts_t            rktp_ts_fetch_backoff; /* Back off fetcher for
-                                                   * this partition until this
-                                                   * absolute timestamp
-                                                   * expires. */
-
-	int64_t            rktp_query_offset;    /* Offset to query broker for*/
-	int64_t            rktp_next_offset;     /* Next offset to start
-                                                  * fetching from.
-                                                  * Locality: toppar thread */
-	int64_t            rktp_last_next_offset; /* Last next_offset handled
-						   * by fetch_decide().
-						   * Locality: broker thread */
-	int64_t            rktp_app_offset;      /* Last offset delivered to
-						  * application + 1 */
-	int64_t            rktp_stored_offset;   /* Last stored offset, but
-						  * maybe not committed yet. */
-        int64_t            rktp_committing_offset; /* Offset currently being
-                                                    * committed */
-	int64_t            rktp_committed_offset; /* Last committed offset */
-	rd_ts_t            rktp_ts_committed_offset; /* Timestamp of last
-                                                      * commit */
-
-        struct offset_stats rktp_offsets; /* Current offsets.
-                                           * Locality: broker thread*/
-        struct offset_stats rktp_offsets_fin; /* Finalized offset for stats.
-                                               * Updated periodically
-                                               * by broker thread.
-                                               * Locks: toppar_lock */
-
-	int64_t rktp_hi_offset;              /* Current high offset.
-					      * Locks: toppar_lock */
-        int64_t rktp_lo_offset;              /* Current broker low offset.
-                                              * This is outside of the stats
-                                              * struct due to this field
-                                              * being populated by the
-                                              * toppar thread rather than
-                                              * the broker thread.
-                                              * Locality: toppar thread
-                                              * Locks: toppar_lock */
-
-        rd_ts_t            rktp_ts_offset_lag;
-
-	char              *rktp_offset_path;     /* Path to offset file */
-	FILE              *rktp_offset_fp;       /* Offset file pointer */
-        rd_kafka_cgrp_t   *rktp_cgrp;            /* Belongs to this cgrp */
-
-        int                rktp_assigned;   /* Partition in cgrp assignment */
-
-        rd_kafka_replyq_t  rktp_replyq; /* Current replyq+version
-					 * for propagating
-					 * major operations, e.g.,
-					 * FETCH_STOP. */
-        //LOCK: toppar_lock().  RD_KAFKA_TOPPAR_F_DESIRED
-        //LOCK: toppar_lock().  RD_KAFKA_TOPPAR_F_UNKNOWN
-	int                rktp_flags;
-#define RD_KAFKA_TOPPAR_F_DESIRED  0x1      /* This partition is desired
-					     * by a consumer. */
-#define RD_KAFKA_TOPPAR_F_UNKNOWN  0x2      /* Topic is (not yet) seen on
-					     * a broker. */
-#define RD_KAFKA_TOPPAR_F_OFFSET_STORE 0x4  /* Offset store is active */
-#define RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING 0x8 /* Offset store stopping */
-#define RD_KAFKA_TOPPAR_F_APP_PAUSE  0x10   /* App pause()d consumption */
-#define RD_KAFKA_TOPPAR_F_LIB_PAUSE  0x20   /* librdkafka paused consumption */
-#define RD_KAFKA_TOPPAR_F_REMOVE     0x40   /* partition removed from cluster */
-#define RD_KAFKA_TOPPAR_F_LEADER_ERR 0x80   /* Operation failed:
-                                             * leader might be missing.
-                                             * Typically set from
-                                             * ProduceResponse failure. */
-
-        shptr_rd_kafka_toppar_t *rktp_s_for_desp; /* Shared pointer for
-                                                   * rkt_desp list */
-        shptr_rd_kafka_toppar_t *rktp_s_for_cgrp; /* Shared pointer for
-                                                   * rkcg_toppars list */
-        shptr_rd_kafka_toppar_t *rktp_s_for_rkb;  /* Shared pointer for
-                                                   * rkb_toppars list */
-
-	/*
-	 * Timers
-	 */
-	rd_kafka_timer_t rktp_offset_query_tmr;  /* Offset query timer */
-	rd_kafka_timer_t rktp_offset_commit_tmr; /* Offset commit timer */
-	rd_kafka_timer_t rktp_offset_sync_tmr;   /* Offset file sync timer */
-        rd_kafka_timer_t rktp_consumer_lag_tmr;  /* Consumer lag monitoring
-						  * timer */
-
-        int rktp_wait_consumer_lag_resp;         /* Waiting for consumer lag
-                                                  * response. */
-
-	struct {
-		rd_atomic64_t tx_msgs;
-		rd_atomic64_t tx_bytes;
-                rd_atomic64_t msgs;
-                rd_atomic64_t rx_ver_drops;
-	} rktp_c;
-
-};
-
-
-/**
- * Check if toppar is paused (consumer).
- * Locks: toppar_lock() MUST be held.
- */
-#define RD_KAFKA_TOPPAR_IS_PAUSED(rktp)				\
-	((rktp)->rktp_flags & (RD_KAFKA_TOPPAR_F_APP_PAUSE |	\
-			       RD_KAFKA_TOPPAR_F_LIB_PAUSE))
-
-
-
-
-/* Converts a shptr..toppar_t to a toppar_t */
-#define rd_kafka_toppar_s2i(s_rktp) rd_shared_ptr_obj(s_rktp)
-
-
-/**
- * Returns a shared pointer for the topic.
- */
-#define rd_kafka_toppar_keep(rktp)                                      \
-        rd_shared_ptr_get(rktp, &(rktp)->rktp_refcnt, shptr_rd_kafka_toppar_t)
-
-#define rd_kafka_toppar_keep_src(func,line,rktp)			\
-        rd_shared_ptr_get_src(func, line, rktp,				\
-			      &(rktp)->rktp_refcnt, shptr_rd_kafka_toppar_t)
-
-
-/**
- * Frees a shared pointer previously returned by ..toppar_keep()
- */
-#define rd_kafka_toppar_destroy(s_rktp)                                 \
-        rd_shared_ptr_put(s_rktp,                                       \
-                          &rd_kafka_toppar_s2i(s_rktp)->rktp_refcnt,    \
-                          rd_kafka_toppar_destroy_final(                \
-                                  rd_kafka_toppar_s2i(s_rktp)))
-
-
-
-
-#define rd_kafka_toppar_lock(rktp)     mtx_lock(&(rktp)->rktp_lock)
-#define rd_kafka_toppar_unlock(rktp)   mtx_unlock(&(rktp)->rktp_lock)
-
-static const char *rd_kafka_toppar_name (const rd_kafka_toppar_t *rktp)
-	RD_UNUSED;
-static const char *rd_kafka_toppar_name (const rd_kafka_toppar_t *rktp) {
-	static RD_TLS char ret[256];
-
-	rd_snprintf(ret, sizeof(ret), "%.*s [%"PRId32"]",
-		    RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-		    rktp->rktp_partition);
-
-	return ret;
-}
-shptr_rd_kafka_toppar_t *rd_kafka_toppar_new0 (rd_kafka_itopic_t *rkt,
-					       int32_t partition,
-					       const char *func, int line);
-#define rd_kafka_toppar_new(rkt,partition) \
-	rd_kafka_toppar_new0(rkt, partition, __FUNCTION__, __LINE__)
-void rd_kafka_toppar_destroy_final (rd_kafka_toppar_t *rktp);
-void rd_kafka_toppar_purge_queues (rd_kafka_toppar_t *rktp);
-void rd_kafka_toppar_set_fetch_state (rd_kafka_toppar_t *rktp,
-                                      int fetch_state);
-void rd_kafka_toppar_insert_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm);
-void rd_kafka_toppar_enq_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm);
-void rd_kafka_toppar_deq_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm);
-void rd_kafka_toppar_insert_msgq (rd_kafka_toppar_t *rktp,
-				  rd_kafka_msgq_t *rkmq);
-void rd_kafka_toppar_concat_msgq (rd_kafka_toppar_t *rktp,
-				  rd_kafka_msgq_t *rkmq);
-void rd_kafka_toppar_enq_error (rd_kafka_toppar_t *rktp,
-                                rd_kafka_resp_err_t err);
-shptr_rd_kafka_toppar_t *rd_kafka_toppar_get0 (const char *func, int line,
-                                               const rd_kafka_itopic_t *rkt,
-                                               int32_t partition,
-                                               int ua_on_miss);
-#define rd_kafka_toppar_get(rkt,partition,ua_on_miss) \
-        rd_kafka_toppar_get0(__FUNCTION__,__LINE__,rkt,partition,ua_on_miss)
-shptr_rd_kafka_toppar_t *rd_kafka_toppar_get2 (rd_kafka_t *rk,
-                                               const char *topic,
-                                               int32_t partition,
-                                               int ua_on_miss,
-                                               int create_on_miss);
-shptr_rd_kafka_toppar_t *
-rd_kafka_toppar_get_avail (const rd_kafka_itopic_t *rkt,
-                           int32_t partition,
-                           int ua_on_miss,
-                           rd_kafka_resp_err_t *errp);
-
-shptr_rd_kafka_toppar_t *rd_kafka_toppar_desired_get (rd_kafka_itopic_t *rkt,
-                                                      int32_t partition);
-void rd_kafka_toppar_desired_add0 (rd_kafka_toppar_t *rktp);
-shptr_rd_kafka_toppar_t *rd_kafka_toppar_desired_add (rd_kafka_itopic_t *rkt,
-                                                      int32_t partition);
-void rd_kafka_toppar_desired_link (rd_kafka_toppar_t *rktp);
-void rd_kafka_toppar_desired_unlink (rd_kafka_toppar_t *rktp);
-void rd_kafka_toppar_desired_del (rd_kafka_toppar_t *rktp);
-
-int rd_kafka_toppar_ua_move (rd_kafka_itopic_t *rkt, rd_kafka_msgq_t *rkmq);
-
-void rd_kafka_toppar_next_offset_handle (rd_kafka_toppar_t *rktp,
-                                         int64_t Offset);
-
-void rd_kafka_toppar_offset_commit (rd_kafka_toppar_t *rktp, int64_t offset,
-				    const char *metadata);
-
-void rd_kafka_toppar_broker_delegate (rd_kafka_toppar_t *rktp,
-				      rd_kafka_broker_t *rkb,
-				      int for_removal);
-
-
-rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start (rd_kafka_toppar_t *rktp,
-                                                    int64_t offset,
-                                                    rd_kafka_q_t *fwdq,
-                                                    rd_kafka_replyq_t replyq);
-
-rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop (rd_kafka_toppar_t *rktp,
-                                                   rd_kafka_replyq_t replyq);
-
-rd_kafka_resp_err_t rd_kafka_toppar_op_seek (rd_kafka_toppar_t *rktp,
-                                             int64_t offset,
-                                             rd_kafka_replyq_t replyq);
-
-rd_kafka_resp_err_t rd_kafka_toppar_op_pause (rd_kafka_toppar_t *rktp,
-					      int pause, int flag);
-
-void rd_kafka_toppar_fetch_stopped (rd_kafka_toppar_t *rktp,
-                                    rd_kafka_resp_err_t err);
-
-/**
- * Updates the current toppar fetch round-robin next pointer.
- */
-static RD_INLINE RD_UNUSED
-void rd_kafka_broker_fetch_toppar_next (rd_kafka_broker_t *rkb,
-                                        rd_kafka_toppar_t *sugg_next) {
-        if (CIRCLEQ_EMPTY(&rkb->rkb_fetch_toppars) ||
-            (void *)sugg_next == CIRCLEQ_ENDC(&rkb->rkb_fetch_toppars))
-                rkb->rkb_fetch_toppar_next = NULL;
-        else if (sugg_next)
-                rkb->rkb_fetch_toppar_next = sugg_next;
-        else
-                rkb->rkb_fetch_toppar_next =
-                        CIRCLEQ_FIRST(&rkb->rkb_fetch_toppars);
-}
-
-
-rd_ts_t rd_kafka_toppar_fetch_decide (rd_kafka_toppar_t *rktp,
-                                      rd_kafka_broker_t *rkb,
-                                      int force_remove);
-
-
-
-rd_ts_t rd_kafka_broker_consumer_toppar_serve (rd_kafka_broker_t *rkb,
-                                               rd_kafka_toppar_t *rktp);
-
-
-void rd_kafka_toppar_offset_fetch (rd_kafka_toppar_t *rktp,
-                                   rd_kafka_replyq_t replyq);
-
-void rd_kafka_toppar_offset_request (rd_kafka_toppar_t *rktp,
-				     int64_t query_offset, int backoff_ms);
-
-
-rd_kafka_assignor_t *
-rd_kafka_assignor_find (rd_kafka_t *rk, const char *protocol);
-
-
-rd_kafka_broker_t *rd_kafka_toppar_leader (rd_kafka_toppar_t *rktp,
-                                           int proper_broker);
-void rd_kafka_toppar_leader_unavailable (rd_kafka_toppar_t *rktp,
-                                         const char *reason,
-                                         rd_kafka_resp_err_t err);
-
-rd_kafka_resp_err_t
-rd_kafka_toppars_pause_resume (rd_kafka_t *rk, int pause, int flag,
-			       rd_kafka_topic_partition_list_t *partitions);
-
-
-rd_kafka_topic_partition_t *rd_kafka_topic_partition_new (const char *topic,
-							  int32_t partition);
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_new_from_rktp (rd_kafka_toppar_t *rktp);
-
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_list_add0 (rd_kafka_topic_partition_list_t *rktparlist,
-                                    const char *topic, int32_t partition,
-				    shptr_rd_kafka_toppar_t *_private);
-
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_list_upsert (
-        rd_kafka_topic_partition_list_t *rktparlist,
-        const char *topic, int32_t partition);
-
-int rd_kafka_topic_partition_match (rd_kafka_t *rk,
-				    const rd_kafka_group_member_t *rkgm,
-				    const rd_kafka_topic_partition_t *rktpar,
-				    const char *topic, int *matched_by_regex);
-
-
-void rd_kafka_topic_partition_list_sort_by_topic (
-        rd_kafka_topic_partition_list_t *rktparlist);
-
-void
-rd_kafka_topic_partition_list_reset_offsets (rd_kafka_topic_partition_list_t *rktparlist,
-					     int64_t offset);
-
-int rd_kafka_topic_partition_list_set_offsets (
-	rd_kafka_t *rk,
-        rd_kafka_topic_partition_list_t *rktparlist,
-        int from_rktp, int64_t def_value, int is_commit);
-
-int rd_kafka_topic_partition_list_count_abs_offsets (
-	const rd_kafka_topic_partition_list_t *rktparlist);
-
-shptr_rd_kafka_toppar_t *
-rd_kafka_topic_partition_get_toppar (rd_kafka_t *rk,
-                                     rd_kafka_topic_partition_t *rktpar);
-
-shptr_rd_kafka_toppar_t *
-rd_kafka_topic_partition_list_get_toppar (
-        rd_kafka_t *rk, rd_kafka_topic_partition_t *rktpar);
-
-void
-rd_kafka_topic_partition_list_update_toppars (rd_kafka_t *rk,
-                                              rd_kafka_topic_partition_list_t
-                                              *rktparlist);
-
-int
-rd_kafka_topic_partition_list_get_leaders (
-        rd_kafka_t *rk,
-        rd_kafka_topic_partition_list_t *rktparlist,
-        rd_list_t *leaders, rd_list_t *query_topics);
-
-rd_kafka_resp_err_t
-rd_kafka_topic_partition_list_query_leaders (
-        rd_kafka_t *rk,
-        rd_kafka_topic_partition_list_t *rktparlist,
-        rd_list_t *leaders, int timeout_ms);
-
-int
-rd_kafka_topic_partition_list_get_topics (
-        rd_kafka_t *rk,
-        rd_kafka_topic_partition_list_t *rktparlist,
-        rd_list_t *rkts);
-
-int
-rd_kafka_topic_partition_list_get_topic_names (
-        const rd_kafka_topic_partition_list_t *rktparlist,
-        rd_list_t *topics, int include_regex);
-
-void
-rd_kafka_topic_partition_list_log (rd_kafka_t *rk, const char *fac,
-				   const rd_kafka_topic_partition_list_t *rktparlist);
-
-#define RD_KAFKA_FMT_F_OFFSET    0x1  /* Print offset */
-#define RD_KAFKA_FMT_F_ONLY_ERR  0x2  /* Only include errored entries */
-#define RD_KAFKA_FMT_F_NO_ERR    0x4  /* Dont print error string */
-const char *
-rd_kafka_topic_partition_list_str (const rd_kafka_topic_partition_list_t *rktparlist,
-                                   char *dest, size_t dest_size,
-                                   int fmt_flags);
-
-void
-rd_kafka_topic_partition_list_update (rd_kafka_topic_partition_list_t *dst,
-                                      const rd_kafka_topic_partition_list_t *src);
-
-int rd_kafka_topic_partition_leader_cmp (const void *_a, const void *_b);
-
-rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match (
-        const rd_kafka_topic_partition_list_t *rktparlist,
-        int (*match) (const void *elem, const void *opaque),
-        void *opaque);
-
-size_t
-rd_kafka_topic_partition_list_sum (
-        const rd_kafka_topic_partition_list_t *rktparlist,
-        size_t (*cb) (const rd_kafka_topic_partition_t *rktpar, void *opaque),
-        void *opaque);
-
-void rd_kafka_topic_partition_list_set_err (
-        rd_kafka_topic_partition_list_t *rktparlist,
-        rd_kafka_resp_err_t err);
-
-int rd_kafka_topic_partition_list_regex_cnt (
-        const rd_kafka_topic_partition_list_t *rktparlist);
-
-/**
- * @brief Toppar + Op version tuple used for mapping Fetched partitions
- *        back to their fetch versions.
- */
-struct rd_kafka_toppar_ver {
-	shptr_rd_kafka_toppar_t *s_rktp;
-	int32_t version;
-};
-
-
-/**
- * @brief Toppar + Op version comparator.
- */
-static RD_INLINE RD_UNUSED
-int rd_kafka_toppar_ver_cmp (const void *_a, const void *_b) {
-	const struct rd_kafka_toppar_ver *a = _a, *b = _b;
-	const rd_kafka_toppar_t *rktp_a = rd_kafka_toppar_s2i(a->s_rktp);
-	const rd_kafka_toppar_t *rktp_b = rd_kafka_toppar_s2i(b->s_rktp);
-	int r;
-
-	if (rktp_a->rktp_rkt != rktp_b->rktp_rkt &&
-	    (r = rd_kafkap_str_cmp(rktp_a->rktp_rkt->rkt_topic,
-				   rktp_b->rktp_rkt->rkt_topic)))
-		return r;
-
-	return rktp_a->rktp_partition - rktp_b->rktp_partition;
-}
-
-/**
- * @brief Frees up resources for \p tver but not the \p tver itself.
- */
-static RD_INLINE RD_UNUSED
-void rd_kafka_toppar_ver_destroy (struct rd_kafka_toppar_ver *tver) {
-	rd_kafka_toppar_destroy(tver->s_rktp);
-}
-
-
-/**
- * @returns 1 if rko version is outdated, else 0.
- */
-static RD_INLINE RD_UNUSED
-int rd_kafka_op_version_outdated (rd_kafka_op_t *rko, int version) {
-	if (!rko->rko_version)
-		return 0;
-
-	if (version)
-		return rko->rko_version < version;
-
-	if (rko->rko_rktp)
-		return rko->rko_version <
-			rd_atomic32_get(&rd_kafka_toppar_s2i(
-						rko->rko_rktp)->rktp_version);
-	return 0;
-}
-
-void
-rd_kafka_toppar_offset_commit_result (rd_kafka_toppar_t *rktp,
-				      rd_kafka_resp_err_t err,
-				      rd_kafka_topic_partition_list_t *offsets);
-
-void rd_kafka_toppar_broker_leave_for_remove (rd_kafka_toppar_t *rktp);
-
-
-/**
- * @brief Represents a leader and the partitions it is leader for.
- */
-struct rd_kafka_partition_leader {
-        rd_kafka_broker_t *rkb;
-        rd_kafka_topic_partition_list_t *partitions;
-};
-
-static RD_UNUSED void
-rd_kafka_partition_leader_destroy (struct rd_kafka_partition_leader *leader) {
-        rd_kafka_broker_destroy(leader->rkb);
-        rd_kafka_topic_partition_list_destroy(leader->partitions);
-        rd_free(leader);
-}
-
-static RD_UNUSED struct rd_kafka_partition_leader *
-rd_kafka_partition_leader_new (rd_kafka_broker_t *rkb) {
-        struct rd_kafka_partition_leader *leader = rd_malloc(sizeof(*leader));
-        leader->rkb = rkb;
-        rd_kafka_broker_keep(rkb);
-        leader->partitions = rd_kafka_topic_partition_list_new(0);
-        return leader;
-}
-
-static RD_UNUSED
-int rd_kafka_partition_leader_cmp (const void *_a, const void *_b) {
-        const struct rd_kafka_partition_leader *a = _a, *b = _b;
-        return rd_kafka_broker_cmp(a->rkb, b->rkb);
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_pattern.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_pattern.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_pattern.c
deleted file mode 100644
index fc2d711..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_pattern.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_pattern.h"
-
-void rd_kafka_pattern_destroy (rd_kafka_pattern_list_t *plist,
-                               rd_kafka_pattern_t *rkpat) {
-        TAILQ_REMOVE(&plist->rkpl_head, rkpat, rkpat_link);
-	rd_regex_destroy(rkpat->rkpat_re);
-        rd_free(rkpat->rkpat_orig);
-        rd_free(rkpat);
-}
-
-void rd_kafka_pattern_add (rd_kafka_pattern_list_t *plist,
-                           rd_kafka_pattern_t *rkpat) {
-        TAILQ_INSERT_TAIL(&plist->rkpl_head, rkpat, rkpat_link);
-}
-
-rd_kafka_pattern_t *rd_kafka_pattern_new (const char *pattern,
-                                          char *errstr, int errstr_size) {
-        rd_kafka_pattern_t *rkpat;
-
-	rkpat = rd_calloc(1, sizeof(*rkpat));
-
-	/* Verify and precompile pattern */
-	if (!(rkpat->rkpat_re = rd_regex_comp(pattern, errstr, errstr_size))) {
-		rd_free(rkpat);
-		return NULL;
-	}
-
-        rkpat->rkpat_orig = rd_strdup(pattern);
-
-        return rkpat;
-}
-
-
-
-int rd_kafka_pattern_match (rd_kafka_pattern_list_t *plist, const char *str) {
-        rd_kafka_pattern_t *rkpat;
-
-        TAILQ_FOREACH(rkpat, &plist->rkpl_head, rkpat_link) {
-		if (rd_regex_exec(rkpat->rkpat_re, str))
-                        return 1;
-        }
-
-        return 0;
-}
-
-
-/**
- * Append pattern to list.
- */
-int rd_kafka_pattern_list_append (rd_kafka_pattern_list_t *plist,
-                                  const char *pattern,
-                                  char *errstr, int errstr_size) {
-        rd_kafka_pattern_t *rkpat;
-        rkpat = rd_kafka_pattern_new(pattern, errstr, errstr_size);
-        if (!rkpat)
-                return -1;
-
-        rd_kafka_pattern_add(plist, rkpat);
-        return 0;
-}
-
-/**
- * Remove matching patterns.
- * Returns the number of removed patterns.
- */
-int rd_kafka_pattern_list_remove (rd_kafka_pattern_list_t *plist,
-                                  const char *pattern) {
-        rd_kafka_pattern_t *rkpat, *rkpat_tmp;
-        int cnt = 0;
-
-        TAILQ_FOREACH_SAFE(rkpat, &plist->rkpl_head, rkpat_link, rkpat_tmp) {
-                if (!strcmp(rkpat->rkpat_orig, pattern)) {
-                        rd_kafka_pattern_destroy(plist, rkpat);
-                        cnt++;
-                }
-        }
-        return cnt;
-}
-
-/**
- * Parse a patternlist and populate a list with it.
- */
-static int rd_kafka_pattern_list_parse (rd_kafka_pattern_list_t *plist,
-                                        const char *patternlist,
-                                        char *errstr, size_t errstr_size) {
-		char *s;
-		rd_strdupa(&s, patternlist);
-
-        while (s && *s) {
-                char *t = s;
-                char re_errstr[256];
-
-                /* Find separator */
-                while ((t = strchr(t, ','))) {
-                        if (t > s && *(t-1) == ',') {
-                                /* separator was escaped,
-                                   remove escape and scan again. */
-                                memmove(t-1, t, strlen(t)+1);
-                                t++;
-                        } else {
-                                *t = '\0';
-                                t++;
-                                break;
-                        }
-                }
-
-                if (rd_kafka_pattern_list_append(plist, s, re_errstr,
-                                                 sizeof(re_errstr)) == -1) {
-                        rd_snprintf(errstr, errstr_size,
-                                    "Failed to parse pattern \"%s\": "
-                                    "%s", s, re_errstr);
-                        rd_kafka_pattern_list_clear(plist);
-                        return -1;
-                }
-
-                s = t;
-        }
-
-        return 0;
-}
-
-
-/**
- * Clear a pattern list.
- */
-void rd_kafka_pattern_list_clear (rd_kafka_pattern_list_t *plist) {
-        rd_kafka_pattern_t *rkpat;
-
-        while ((rkpat = TAILQ_FIRST(&plist->rkpl_head)))
-                rd_kafka_pattern_destroy(plist, rkpat);
-
-        if (plist->rkpl_orig) {
-                rd_free(plist->rkpl_orig);
-                plist->rkpl_orig = NULL;
-        }
-}
-
-
-/**
- * Free a pattern list previously created with list_new()
- */
-void rd_kafka_pattern_list_destroy (rd_kafka_pattern_list_t *plist) {
-        rd_kafka_pattern_list_clear(plist);
-        rd_free(plist);
-}
-
-/**
- * Initialize a pattern list, optionally populating it with the
- * comma-separated patterns in 'patternlist'.
- */
-int rd_kafka_pattern_list_init (rd_kafka_pattern_list_t *plist,
-                                const char *patternlist,
-                                char *errstr, size_t errstr_size) {
-        TAILQ_INIT(&plist->rkpl_head);
-        if (patternlist) {
-                if (rd_kafka_pattern_list_parse(plist, patternlist,
-                                                errstr, errstr_size) == -1)
-                        return -1;
-                plist->rkpl_orig = rd_strdup(patternlist);
-        } else
-                plist->rkpl_orig = NULL;
-
-        return 0;
-}
-
-
-/**
- * Allocate and initialize a new list.
- */
-rd_kafka_pattern_list_t *rd_kafka_pattern_list_new (const char *patternlist,
-                                                    char *errstr,
-                                                    int errstr_size) {
-        rd_kafka_pattern_list_t *plist;
-
-        plist = rd_calloc(1, sizeof(*plist));
-
-        if (rd_kafka_pattern_list_init(plist, patternlist,
-                                       errstr, errstr_size) == -1) {
-                rd_free(plist);
-                return NULL;
-        }
-
-        return plist;
-}
-
-
-/**
- * Make a copy of a pattern list.
- */
-rd_kafka_pattern_list_t *
-rd_kafka_pattern_list_copy (rd_kafka_pattern_list_t *src) {
-	char errstr[16];
-	return rd_kafka_pattern_list_new(src->rkpl_orig,
-					 errstr, sizeof(errstr));
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_pattern.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_pattern.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_pattern.h
deleted file mode 100644
index 6e6f976..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_pattern.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#pragma once
-
-#include "rdregex.h"
-
-typedef struct rd_kafka_pattern_s {
-        TAILQ_ENTRY(rd_kafka_pattern_s)  rkpat_link;
-
-	rd_regex_t  *rkpat_re;   /* Compiled regex */
-        char        *rkpat_orig;  /* Original pattern */
-} rd_kafka_pattern_t;
-
-typedef struct rd_kafka_pattern_list_s {
-        TAILQ_HEAD(,rd_kafka_pattern_s) rkpl_head;
-        char   *rkpl_orig;
-} rd_kafka_pattern_list_t;
-
-void rd_kafka_pattern_destroy (rd_kafka_pattern_list_t *plist,
-                               rd_kafka_pattern_t *rkpat);
-void rd_kafka_pattern_add (rd_kafka_pattern_list_t *plist,
-                           rd_kafka_pattern_t *rkpat);
-rd_kafka_pattern_t *rd_kafka_pattern_new (const char *pattern,
-                                          char *errstr, int errstr_size);
-int rd_kafka_pattern_match (rd_kafka_pattern_list_t *plist, const char *str);
-int rd_kafka_pattern_list_append (rd_kafka_pattern_list_t *plist,
-                                  const char *pattern,
-                                  char *errstr, int errstr_size);
-int rd_kafka_pattern_list_remove (rd_kafka_pattern_list_t *plist,
-                                  const char *pattern);
-void rd_kafka_pattern_list_clear (rd_kafka_pattern_list_t *plist);
-void rd_kafka_pattern_list_destroy (rd_kafka_pattern_list_t *plist);
-int rd_kafka_pattern_list_init (rd_kafka_pattern_list_t *plist,
-                                const char *patternlist,
-                                char *errstr, size_t errstr_size);
-rd_kafka_pattern_list_t *rd_kafka_pattern_list_new (const char *patternlist,
-                                                    char *errstr,
-                                                    int errstr_size);
-rd_kafka_pattern_list_t *
-rd_kafka_pattern_list_copy (rd_kafka_pattern_list_t *src);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_plugin.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_plugin.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_plugin.c
deleted file mode 100644
index b899899..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_plugin.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_plugin.h"
-#include "rddl.h"
-
-
-typedef struct rd_kafka_plugin_s {
-        char *rkplug_path;         /* Library path */
-        rd_kafka_t *rkplug_rk;     /* Backpointer to the rk handle */
-        void *rkplug_handle;       /* dlopen (or similar) handle */
-        void *rkplug_opaque;       /* Plugin's opaque */
-
-} rd_kafka_plugin_t;
-
-
-/**
- * @brief Plugin path comparator
- */
-static int rd_kafka_plugin_cmp (const void *_a, const void *_b) {
-        const rd_kafka_plugin_t *a = _a, *b = _b;
-
-        return strcmp(a->rkplug_path, b->rkplug_path);
-}
-
-
-/**
- * @brief Add plugin (by library path) and calls its conf_init() constructor
- *
- * @returns an error code on error.
- * @remark duplicate plugins are silently ignored.
- *
- * @remark Libraries are refcounted and thus not unloaded until all
- *         plugins referencing the library have been destroyed.
- *         (dlopen() and LoadLibrary() does this for us)
- */
-static rd_kafka_resp_err_t
-rd_kafka_plugin_new (rd_kafka_conf_t *conf, const char *path,
-                     char *errstr, size_t errstr_size) {
-        rd_kafka_plugin_t *rkplug;
-        const rd_kafka_plugin_t skel = { .rkplug_path = (char *)path };
-        rd_kafka_plugin_f_conf_init_t *conf_init;
-        rd_kafka_resp_err_t err;
-        void *handle;
-        void *plug_opaque = NULL;
-
-        /* Avoid duplicates */
-        if (rd_list_find(&conf->plugins, &skel, rd_kafka_plugin_cmp)) {
-                rd_snprintf(errstr, errstr_size,
-                            "Ignoring duplicate plugin %s", path);
-                return RD_KAFKA_RESP_ERR_NO_ERROR;
-        }
-
-        rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD",
-                      "Loading plugin \"%s\"", path);
-
-        /* Attempt to load library */
-        if (!(handle = rd_dl_open(path, errstr, errstr_size))) {
-                rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD",
-                              "Failed to load plugin \"%s\": %s",
-                              path, errstr);
-                return RD_KAFKA_RESP_ERR__FS;
-        }
-
-        /* Find conf_init() function */
-        if (!(conf_init = rd_dl_sym(handle, "conf_init",
-                                    errstr, errstr_size))) {
-                rd_dl_close(handle);
-                return RD_KAFKA_RESP_ERR__INVALID_ARG;
-        }
-
-        /* Call conf_init() */
-        rd_kafka_dbg0(conf, PLUGIN, "PLUGINIT",
-                      "Calling plugin \"%s\" conf_init()", path);
-
-        if ((err = conf_init(conf, &plug_opaque, errstr, errstr_size))) {
-                rd_dl_close(handle);
-                return err;
-        }
-
-        rkplug = rd_calloc(1, sizeof(*rkplug));
-        rkplug->rkplug_path        = rd_strdup(path);
-        rkplug->rkplug_handle      = handle;
-        rkplug->rkplug_opaque = plug_opaque;
-
-        rd_list_add(&conf->plugins, rkplug);
-
-        rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD",
-                      "Plugin \"%s\" loaded", path);
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Free the plugin, any conf_destroy() interceptors will have been
- *        called prior to this call.
- * @remark plugin is not removed from any list (caller's responsibility)
- * @remark this relies on the actual library loader to refcount libraries,
- *         especially in the config copy case.
- *         This is true for POSIX dlopen() and Win32 LoadLibrary().
- * @locality application thread
- */
-static void rd_kafka_plugin_destroy (rd_kafka_plugin_t *rkplug) {
-        rd_dl_close(rkplug->rkplug_handle);
-        rd_free(rkplug->rkplug_path);
-        rd_free(rkplug);
-}
-
-
-
-/**
- * @brief Initialize all configured plugins.
- *
- * @remark Any previously loaded plugins will be unloaded.
- *
- * @returns the error code of the first failing plugin.
- * @locality application thread calling rd_kafka_new().
- */
-static rd_kafka_conf_res_t
-rd_kafka_plugins_conf_set0 (rd_kafka_conf_t *conf, const char *paths,
-                            char *errstr, size_t errstr_size) {
-        char *s;
-
-        rd_list_destroy(&conf->plugins);
-        rd_list_init(&conf->plugins, 0, (void *)&rd_kafka_plugin_destroy);
-
-        if (!paths || !*paths)
-                return RD_KAFKA_CONF_OK;
-
-        /* Split paths by ; */
-        rd_strdupa(&s, paths);
-
-        rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD",
-                      "Loading plugins from conf object %p: \"%s\"",
-                      conf, paths);
-
-        while (s && *s) {
-                char *path = s;
-                char *t;
-                rd_kafka_resp_err_t err;
-
-                if ((t = strchr(s, ';'))) {
-                        *t = '\0';
-                        s = t+1;
-                } else {
-                        s = NULL;
-                }
-
-                if ((err = rd_kafka_plugin_new(conf, path,
-                                               errstr, errstr_size))) {
-                        /* Failed to load plugin */
-                        size_t elen = errstr_size > 0 ? strlen(errstr) : 0;
-
-                        /* See if there is room for appending the
-                         * plugin path to the error message. */
-                        if (elen + strlen("(plugin )") + strlen(path) <
-                            errstr_size)
-                                rd_snprintf(errstr+elen, errstr_size-elen,
-                                            " (plugin %s)", path);
-
-                        rd_list_destroy(&conf->plugins);
-                        return RD_KAFKA_CONF_INVALID;
-                }
-        }
-
-        return RD_KAFKA_CONF_OK;
-}
-
-
-/**
- * @brief Conf setter for "plugin.library.paths"
- */
-rd_kafka_conf_res_t rd_kafka_plugins_conf_set (
-        int scope, void *pconf, const char *name, const char *value,
-        void *dstptr, rd_kafka_conf_set_mode_t set_mode,
-        char *errstr, size_t errstr_size) {
-
-        assert(scope == _RK_GLOBAL);
-        return rd_kafka_plugins_conf_set0((rd_kafka_conf_t *)pconf,
-                                          set_mode == _RK_CONF_PROP_SET_DEL ?
-                                          NULL : value, errstr, errstr_size);
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_plugin.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_plugin.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_plugin.h
deleted file mode 100644
index b588a7d..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_plugin.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_PLUGIN_H
-#define _RDKAFKA_PLUGIN_H
-
-rd_kafka_conf_res_t rd_kafka_plugins_conf_set (
-        int scope, void *conf, const char *name, const char *value,
-        void *dstptr, rd_kafka_conf_set_mode_t set_mode,
-        char *errstr, size_t errstr_size);
-
-#endif /* _RDKAFKA_PLUGIN_H */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_proto.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_proto.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_proto.h
deleted file mode 100644
index d778c4d..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_proto.h
+++ /dev/null
@@ -1,498 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-
-#include "rdendian.h"
-#include "rdvarint.h"
-
-
-
-/*
- * Kafka protocol definitions.
- */
-
-#define RD_KAFKA_PORT      9092
-#define RD_KAFKA_PORT_STR "9092"
-
-
-/**
- * Request types
- */
-struct rd_kafkap_reqhdr {
-        int32_t  Size;
-        int16_t  ApiKey;
-#define RD_KAFKAP_None         -1
-#define RD_KAFKAP_Produce       0
-#define RD_KAFKAP_Fetch         1
-#define RD_KAFKAP_Offset        2
-#define RD_KAFKAP_Metadata      3
-#define RD_KAFKAP_LeaderAndIsr  4
-#define RD_KAFKAP_StopReplica   5
-#define RD_KAFKAP_OffsetCommit  8
-#define RD_KAFKAP_OffsetFetch   9
-#define RD_KAFKAP_GroupCoordinator 10
-#define RD_KAFKAP_JoinGroup     11
-#define RD_KAFKAP_Heartbeat     12
-#define RD_KAFKAP_LeaveGroup    13
-#define RD_KAFKAP_SyncGroup     14
-#define RD_KAFKAP_DescribeGroups 15
-#define RD_KAFKAP_ListGroups    16
-#define RD_KAFKAP_SaslHandshake 17
-#define RD_KAFKAP_ApiVersion    18
-#define RD_KAFKAP_CreateTopics  19
-#define RD_KAFKAP_DeleteTopics  20
-#define RD_KAFKAP_DeleteRecords 21
-#define RD_KAFKAP_InitProducerId 22
-#define RD_KAFKAP_OffsetForLeaderEpoch 23
-#define RD_KAFKAP_AddPartitionsToTxn 24
-#define RD_KAFKAP_AddOffsetsToTxn 25
-#define RD_KAFKAP_EndTxn        26
-#define RD_KAFKAP_WriteTxnMarkers 27
-#define RD_KAFKAP_TxnOffsetCommit 28
-#define RD_KAFKAP_DescribeAcls  29
-#define RD_KAFKAP_CreateAcls    30
-#define RD_KAFKAP_DeleteAcls    31
-#define RD_KAFKAP_DescribeConfigs 32
-#define RD_KAFKAP_AlterConfigs  33
-#define RD_KAFKAP__NUM          34
-        int16_t  ApiVersion;
-        int32_t  CorrId;
-        /* ClientId follows */
-};
-
-#define RD_KAFKAP_REQHDR_SIZE (4+2+2+4)
-#define RD_KAFKAP_RESHDR_SIZE (4+4)
-
-/**
- * Response header
- */
-struct rd_kafkap_reshdr {
-	int32_t  Size;
-	int32_t  CorrId;
-};
-
-
-
-static RD_UNUSED
-const char *rd_kafka_ApiKey2str (int16_t ApiKey) {
-        static const char *names[] = {
-                [RD_KAFKAP_Produce] = "Produce",
-                [RD_KAFKAP_Fetch] = "Fetch",
-                [RD_KAFKAP_Offset] = "Offset",
-                [RD_KAFKAP_Metadata] = "Metadata",
-                [RD_KAFKAP_LeaderAndIsr] = "LeaderAndIsr",
-                [RD_KAFKAP_StopReplica] = "StopReplica",
-                [RD_KAFKAP_OffsetCommit] = "OffsetCommit",
-                [RD_KAFKAP_OffsetFetch] = "OffsetFetch",
-                [RD_KAFKAP_GroupCoordinator] = "GroupCoordinator",
-                [RD_KAFKAP_JoinGroup] = "JoinGroup",
-                [RD_KAFKAP_Heartbeat] = "Heartbeat",
-                [RD_KAFKAP_LeaveGroup] = "LeaveGroup",
-                [RD_KAFKAP_SyncGroup] = "SyncGroup",
-                [RD_KAFKAP_DescribeGroups] = "DescribeGroups",
-                [RD_KAFKAP_ListGroups] = "ListGroups",
-                [RD_KAFKAP_SaslHandshake] = "SaslHandshake",
-                [RD_KAFKAP_ApiVersion] = "ApiVersion",
-                [RD_KAFKAP_CreateTopics] = "CreateTopics",
-                [RD_KAFKAP_DeleteTopics] = "DeleteTopics",
-                [RD_KAFKAP_DeleteRecords] = "DeleteRecords",
-                [RD_KAFKAP_InitProducerId] = "InitProducerId",
-                [RD_KAFKAP_OffsetForLeaderEpoch] = "OffsetForLeaderEpoch",
-                [RD_KAFKAP_AddPartitionsToTxn] = "AddPartitionsToTxn",
-                [RD_KAFKAP_AddOffsetsToTxn] = "AddOffsetsToTxn",
-                [RD_KAFKAP_EndTxn] = "EndTxn",
-                [RD_KAFKAP_WriteTxnMarkers] = "WriteTxnMarkers",
-                [RD_KAFKAP_TxnOffsetCommit] = "TxnOffsetCommit",
-                [RD_KAFKAP_DescribeAcls] = "DescribeAcls",
-                [RD_KAFKAP_CreateAcls] = "CreateAcls",
-                [RD_KAFKAP_DeleteAcls] = "DeleteAcls",
-                [RD_KAFKAP_DescribeConfigs] = "DescribeConfigs",
-                [RD_KAFKAP_AlterConfigs] = "AlterConfigs"
-        };
-        static RD_TLS char ret[32];
-
-        if (ApiKey < 0 || ApiKey >= (int)RD_ARRAYSIZE(names)) {
-                rd_snprintf(ret, sizeof(ret), "Unknown-%hd?", ApiKey);
-                return ret;
-        }
-
-        return names[ApiKey];
-}
-
-
-
-
-
-
-
-
-/**
- * @brief ApiKey version support tuple.
- */
-struct rd_kafka_ApiVersion {
-	int16_t ApiKey;
-	int16_t MinVer;
-	int16_t MaxVer;
-};
-
-/**
- * @brief ApiVersion.ApiKey comparator.
- */
-static RD_UNUSED int rd_kafka_ApiVersion_key_cmp (const void *_a, const void *_b) {
-	const struct rd_kafka_ApiVersion *a = _a, *b = _b;
-
-	return a->ApiKey - b->ApiKey;
-}
-
-
-
-#define RD_KAFKAP_READ_UNCOMMITTED  0
-#define RD_KAFKAP_READ_COMMITTED    1
-
-
-/**
- *
- * Kafka protocol string representation prefixed with a convenience header
- *
- * Serialized format:
- *  { uint16, data.. }
- *
- */
-typedef struct rd_kafkap_str_s {
-	/* convenience header (aligned access, host endian) */
-	int         len; /* Kafka string length (-1=NULL, 0=empty, >0=string) */
-	const char *str; /* points into data[] or other memory,
-			  * not NULL-terminated */
-} rd_kafkap_str_t;
-
-
-#define RD_KAFKAP_STR_LEN_NULL -1
-#define RD_KAFKAP_STR_IS_NULL(kstr) ((kstr)->len == RD_KAFKAP_STR_LEN_NULL)
-
-/* Returns the length of the string of a kafka protocol string representation */
-#define RD_KAFKAP_STR_LEN0(len) ((len) == RD_KAFKAP_STR_LEN_NULL ? 0 : (len))
-#define RD_KAFKAP_STR_LEN(kstr) RD_KAFKAP_STR_LEN0((kstr)->len)
-
-/* Returns the actual size of a kafka protocol string representation. */
-#define RD_KAFKAP_STR_SIZE0(len) (2 + RD_KAFKAP_STR_LEN0(len))
-#define RD_KAFKAP_STR_SIZE(kstr) RD_KAFKAP_STR_SIZE0((kstr)->len)
-
-
-/* Serialized Kafka string: only works for _new() kstrs */
-#define RD_KAFKAP_STR_SER(kstr)  ((kstr)+1)
-
-/* Macro suitable for "%.*s" printing. */
-#define RD_KAFKAP_STR_PR(kstr)						\
-	(int)((kstr)->len == RD_KAFKAP_STR_LEN_NULL ? 0 : (kstr)->len), \
-		(kstr)->str
-
-/* strndupa() a Kafka string */
-#define RD_KAFKAP_STR_DUPA(destptr,kstr) \
-	rd_strndupa((destptr), (kstr)->str, RD_KAFKAP_STR_LEN(kstr))
-
-/* strndup() a Kafka string */
-#define RD_KAFKAP_STR_DUP(kstr) rd_strndup((kstr)->str, RD_KAFKAP_STR_LEN(kstr))
-
-/**
- * Frees a Kafka string previously allocated with `rd_kafkap_str_new()`
- */
-static RD_UNUSED void rd_kafkap_str_destroy (rd_kafkap_str_t *kstr) {
-	rd_free(kstr);
-}
-
-
-
-/**
- * Allocate a new Kafka string and make a copy of 'str'.
- * If 'len' is -1 the length will be calculated.
- * Supports Kafka NULL strings.
- * Nul-terminates the string, but the trailing \0 is not part of
- * the serialized string.
- */
-static RD_INLINE RD_UNUSED
-rd_kafkap_str_t *rd_kafkap_str_new (const char *str, int len) {
-	rd_kafkap_str_t *kstr;
-	int16_t klen;
-
-	if (!str)
-		len = RD_KAFKAP_STR_LEN_NULL;
-	else if (len == -1)
-		len = str ? (int)strlen(str) : RD_KAFKAP_STR_LEN_NULL;
-
-	kstr = rd_malloc(sizeof(*kstr) + 2 +
-			 (len == RD_KAFKAP_STR_LEN_NULL ? 0 : len + 1));
-	kstr->len = len;
-
-	/* Serialised format: 16-bit string length */
-	klen = htobe16(len);
-	memcpy(kstr+1, &klen, 2);
-
-	/* Serialised format: non null-terminated string */
-	if (len == RD_KAFKAP_STR_LEN_NULL)
-		kstr->str = NULL;
-	else {
-		kstr->str = ((const char *)(kstr+1))+2;
-		memcpy((void *)kstr->str, str, len);
-		((char *)kstr->str)[len] = '\0';
-	}
-
-	return kstr;
-}
-
-
-/**
- * Makes a copy of `src`. The copy will be fully allocated and should
- * be freed with rd_kafka_pstr_destroy()
- */
-static RD_INLINE RD_UNUSED
-rd_kafkap_str_t *rd_kafkap_str_copy (const rd_kafkap_str_t *src) {
-        return rd_kafkap_str_new(src->str, src->len);
-}
-
-static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp (const rd_kafkap_str_t *a,
-						 const rd_kafkap_str_t *b) {
-	int minlen = RD_MIN(a->len, b->len);
-	int r = memcmp(a->str, b->str, minlen);
-	if (r)
-		return r;
-	else
-		return a->len - b->len;
-}
-
-static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp_str (const rd_kafkap_str_t *a,
-						     const char *str) {
-	int len = (int)strlen(str);
-	int minlen = RD_MIN(a->len, len);
-	int r = memcmp(a->str, str, minlen);
-	if (r)
-		return r;
-	else
-		return a->len - len;
-}
-
-static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp_str2 (const char *str,
-						      const rd_kafkap_str_t *b){
-	int len = (int)strlen(str);
-	int minlen = RD_MIN(b->len, len);
-	int r = memcmp(str, b->str, minlen);
-	if (r)
-		return r;
-	else
-		return len - b->len;
-}
-
-
-
-/**
- *
- * Kafka protocol bytes array representation prefixed with a convenience header
- *
- * Serialized format:
- *  { uint32, data.. }
- *
- */
-typedef struct rd_kafkap_bytes_s {
-	/* convenience header (aligned access, host endian) */
-	int32_t     len;   /* Kafka bytes length (-1=NULL, 0=empty, >0=data) */
-	const void *data;  /* points just past the struct, or other memory,
-			    * not NULL-terminated */
-	const char _data[1]; /* Bytes following struct when new()ed */
-} rd_kafkap_bytes_t;
-
-
-#define RD_KAFKAP_BYTES_LEN_NULL -1
-#define RD_KAFKAP_BYTES_IS_NULL(kbytes) \
-	((kbytes)->len == RD_KAFKAP_BYTES_LEN_NULL)
-
-/* Returns the length of the bytes of a kafka protocol bytes representation */
-#define RD_KAFKAP_BYTES_LEN0(len) ((len) == RD_KAFKAP_BYTES_LEN_NULL ? 0:(len))
-#define RD_KAFKAP_BYTES_LEN(kbytes) RD_KAFKAP_BYTES_LEN0((kbytes)->len)
-
-/* Returns the actual size of a kafka protocol bytes representation. */
-#define RD_KAFKAP_BYTES_SIZE0(len) (4 + RD_KAFKAP_BYTES_LEN0(len))
-#define RD_KAFKAP_BYTES_SIZE(kbytes) RD_KAFKAP_BYTES_SIZE0((kbytes)->len)
-
-
-/* Serialized Kafka bytes: only works for _new() kbytes */
-#define RD_KAFKAP_BYTES_SER(kbytes)  ((kbytes)+1)
-
-
-/**
- * Frees a Kafka bytes previously allocated with `rd_kafkap_bytes_new()`
- */
-static RD_UNUSED void rd_kafkap_bytes_destroy (rd_kafkap_bytes_t *kbytes) {
-	rd_free(kbytes);
-}
-
-
-/**
- * @brief Allocate a new Kafka bytes and make a copy of 'bytes'.
- * If \p len > 0 but \p bytes is NULL no copying is performed by
- * the bytes structure will be allocated to fit \p size bytes.
- *
- * Supports:
- *  - Kafka NULL bytes (bytes==NULL,len==0),
- *  - Empty bytes (bytes!=NULL,len==0)
- *  - Copy data (bytes!=NULL,len>0)
- *  - No-copy, just alloc (bytes==NULL,len>0)
- */
-static RD_INLINE RD_UNUSED
-rd_kafkap_bytes_t *rd_kafkap_bytes_new (const char *bytes, int32_t len) {
-	rd_kafkap_bytes_t *kbytes;
-	int32_t klen;
-
-	if (!bytes && !len)
-		len = RD_KAFKAP_BYTES_LEN_NULL;
-
-	kbytes = rd_malloc(sizeof(*kbytes) + 4 +
-			 (len == RD_KAFKAP_BYTES_LEN_NULL ? 0 : len));
-	kbytes->len = len;
-
-	klen = htobe32(len);
-	memcpy(kbytes+1, &klen, 4);
-
-	if (len == RD_KAFKAP_BYTES_LEN_NULL)
-		kbytes->data = NULL;
-	else {
-		kbytes->data = ((const char *)(kbytes+1))+4;
-                if (bytes)
-                        memcpy((void *)kbytes->data, bytes, len);
-	}
-
-	return kbytes;
-}
-
-
-/**
- * Makes a copy of `src`. The copy will be fully allocated and should
- * be freed with rd_kafkap_bytes_destroy()
- */
-static RD_INLINE RD_UNUSED
-rd_kafkap_bytes_t *rd_kafkap_bytes_copy (const rd_kafkap_bytes_t *src) {
-        return rd_kafkap_bytes_new(src->data, src->len);
-}
-
-
-static RD_INLINE RD_UNUSED int rd_kafkap_bytes_cmp (const rd_kafkap_bytes_t *a,
-						   const rd_kafkap_bytes_t *b) {
-	int minlen = RD_MIN(a->len, b->len);
-	int r = memcmp(a->data, b->data, minlen);
-	if (r)
-		return r;
-	else
-		return a->len - b->len;
-}
-
-static RD_INLINE RD_UNUSED
-int rd_kafkap_bytes_cmp_data (const rd_kafkap_bytes_t *a,
-			      const char *data, int len) {
-	int minlen = RD_MIN(a->len, len);
-	int r = memcmp(a->data, data, minlen);
-	if (r)
-		return r;
-	else
-		return a->len - len;
-}
-
-
-
-
-typedef struct rd_kafka_buf_s rd_kafka_buf_t;
-
-
-#define RD_KAFKA_NODENAME_SIZE  128
-
-
-
-
-/**
- * @brief Message overheads (worst-case)
- */
-
-/**
- * MsgVersion v0..v1
- */
-/* Offset + MessageSize */
-#define RD_KAFKAP_MESSAGESET_V0_HDR_SIZE (8+4)
-/* CRC + Magic + Attr + KeyLen + ValueLen */
-#define RD_KAFKAP_MESSAGE_V0_HDR_SIZE    (4+1+1+4+4)
-/* CRC + Magic + Attr + Timestamp + KeyLen + ValueLen */
-#define RD_KAFKAP_MESSAGE_V1_HDR_SIZE    (4+1+1+8+4+4)
-/* Maximum per-message overhead */
-#define RD_KAFKAP_MESSAGE_V0_OVERHEAD                                   \
-        (RD_KAFKAP_MESSAGESET_V0_HDR_SIZE + RD_KAFKAP_MESSAGE_V0_HDR_SIZE)
-#define RD_KAFKAP_MESSAGE_V1_OVERHEAD                                   \
-        (RD_KAFKAP_MESSAGESET_V0_HDR_SIZE + RD_KAFKAP_MESSAGE_V1_HDR_SIZE)
-
-/**
- * MsgVersion v2
- */
-#define RD_KAFKAP_MESSAGE_V2_OVERHEAD                                  \
-        (                                                              \
-        /* Length (varint) */                                          \
-        RD_UVARINT_ENC_SIZEOF(int32_t) +                               \
-        /* Attributes */                                               \
-        1 +                                                            \
-        /* TimestampDelta (varint) */                                  \
-        RD_UVARINT_ENC_SIZEOF(int64_t) +                               \
-        /* OffsetDelta (varint) */                                     \
-        RD_UVARINT_ENC_SIZEOF(int32_t) +                               \
-        /* KeyLen (varint) */                                          \
-        RD_UVARINT_ENC_SIZEOF(int32_t) +                               \
-        /* ValueLen (varint) */                                        \
-        RD_UVARINT_ENC_SIZEOF(int32_t) +                               \
-        /* HeaderCnt (varint): */                                      \
-        RD_UVARINT_ENC_SIZEOF(int32_t)                                 \
-        )
-
-
-
-/**
- * @brief MessageSets are not explicitly versioned but depends on the
- *        Produce/Fetch API version and the encompassed Message versions.
- *        We use the Message version (MsgVersion, aka MagicByte) to describe
- *        the MessageSet version, that is, MsgVersion <= 1 uses the old
- *        MessageSet version (v0?) while MsgVersion 2 uses MessageSet version v2
- */
-
-/* Old MessageSet header: none */
-#define RD_KAFKAP_MSGSET_V0_SIZE                0
-
-/* MessageSet v2 header */
-#define RD_KAFKAP_MSGSET_V2_SIZE                (8+4+4+1+4+2+4+8+8+8+2+4+4)
-
-/* Byte offsets for MessageSet fields */
-#define RD_KAFKAP_MSGSET_V2_OF_Length           (8)
-#define RD_KAFKAP_MSGSET_V2_OF_CRC              (8+4+4+1)
-#define RD_KAFKAP_MSGSET_V2_OF_Attributes       (8+4+4+1+4)
-#define RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta  (8+4+4+1+4+2)
-#define RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp    (8+4+4+1+4+2+4)
-#define RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp     (8+4+4+1+4+2+4+8)
-#define RD_KAFKAP_MSGSET_V2_OF_RecordCount      (8+4+4+1+4+2+4+8+8+8+2+4)


[28/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_conf.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_conf.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_conf.h
deleted file mode 100644
index 0da8015..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_conf.h
+++ /dev/null
@@ -1,338 +0,0 @@
-#pragma once
-
-#include "rdlist.h"
-
-
-/**
- * Forward declarations
- */
-struct rd_kafka_transport_s;
-
-
-/**
- * MessageSet compression codecs
- */
-typedef enum {
-	RD_KAFKA_COMPRESSION_NONE,
-	RD_KAFKA_COMPRESSION_GZIP = RD_KAFKA_MSG_ATTR_GZIP,
-	RD_KAFKA_COMPRESSION_SNAPPY = RD_KAFKA_MSG_ATTR_SNAPPY,
-        RD_KAFKA_COMPRESSION_LZ4 = RD_KAFKA_MSG_ATTR_LZ4,
-	RD_KAFKA_COMPRESSION_INHERIT /* Inherit setting from global conf */
-} rd_kafka_compression_t;
-
-
-typedef enum {
-	RD_KAFKA_PROTO_PLAINTEXT,
-	RD_KAFKA_PROTO_SSL,
-	RD_KAFKA_PROTO_SASL_PLAINTEXT,
-	RD_KAFKA_PROTO_SASL_SSL,
-	RD_KAFKA_PROTO_NUM,
-} rd_kafka_secproto_t;
-
-
-typedef enum {
-	RD_KAFKA_CONFIGURED,
-	RD_KAFKA_LEARNED,
-	RD_KAFKA_INTERNAL,
-} rd_kafka_confsource_t;
-
-typedef	enum {
-	_RK_GLOBAL = 0x1,
-	_RK_PRODUCER = 0x2,
-	_RK_CONSUMER = 0x4,
-	_RK_TOPIC = 0x8,
-        _RK_CGRP = 0x10
-} rd_kafka_conf_scope_t;
-
-typedef enum {
-	_RK_CONF_PROP_SET_REPLACE,  /* Replace current value (default) */
-	_RK_CONF_PROP_SET_ADD,      /* Add value (S2F) */
-	_RK_CONF_PROP_SET_DEL      /* Remove value (S2F) */
-} rd_kafka_conf_set_mode_t;
-
-
-
-typedef enum {
-        RD_KAFKA_OFFSET_METHOD_NONE,
-        RD_KAFKA_OFFSET_METHOD_FILE,
-        RD_KAFKA_OFFSET_METHOD_BROKER
-} rd_kafka_offset_method_t;
-
-
-
-
-/**
- * Optional configuration struct passed to rd_kafka_new*().
- *
- * The struct is populated ted through string properties
- * by calling rd_kafka_conf_set().
- *
- */
-struct rd_kafka_conf_s {
-	/*
-	 * Generic configuration
-	 */
-	int     enabled_events;
-	int     max_msg_size;
-	int     msg_copy_max_size;
-        int     recv_max_msg_size;
-	int     max_inflight;
-	int     metadata_request_timeout_ms;
-	int     metadata_refresh_interval_ms;
-	int     metadata_refresh_fast_cnt;
-	int     metadata_refresh_fast_interval_ms;
-        int     metadata_refresh_sparse;
-        int     metadata_max_age_ms;
-	int     debug;
-	int     broker_addr_ttl;
-        int     broker_addr_family;
-	int     socket_timeout_ms;
-	int     socket_blocking_max_ms;
-	int     socket_sndbuf_size;
-	int     socket_rcvbuf_size;
-        int     socket_keepalive;
-	int     socket_nagle_disable;
-        int     socket_max_fails;
-	char   *client_id_str;
-	char   *brokerlist;
-	int     stats_interval_ms;
-	int     term_sig;
-        int     reconnect_jitter_ms;
-	int     api_version_request;
-	int     api_version_request_timeout_ms;
-	int     api_version_fallback_ms;
-	char   *broker_version_fallback;
-	rd_kafka_secproto_t security_protocol;
-
-#if WITH_SSL
-	struct {
-		SSL_CTX *ctx;
-		char *cipher_suites;
-		char *key_location;
-		char *key_password;
-		char *cert_location;
-		char *ca_location;
-		char *crl_location;
-	} ssl;
-#endif
-
-        struct {
-                const struct rd_kafka_sasl_provider *provider;
-                char *principal;
-                char *mechanisms;
-                char *service_name;
-                char *kinit_cmd;
-                char *keytab;
-                int   relogin_min_time;
-                char *username;
-                char *password;
-#if WITH_SASL_SCRAM
-                /* SCRAM EVP-wrapped hash function
-                 * (return value from EVP_shaX()) */
-                const void/*EVP_MD*/ *scram_evp;
-                /* SCRAM direct hash function (e.g., SHA256()) */
-                unsigned char *(*scram_H) (const unsigned char *d, size_t n,
-                                           unsigned char *md);
-                /* Hash size */
-                size_t         scram_H_size;
-#endif
-        } sasl;
-
-#if WITH_PLUGINS
-        char *plugin_paths;
-        rd_list_t plugins;
-#endif
-
-        /* Interceptors */
-        struct {
-                /* rd_kafka_interceptor_method_t lists */
-                rd_list_t on_conf_set;        /* on_conf_set interceptors
-                                               * (not copied on conf_dup()) */
-                rd_list_t on_conf_dup;        /* .. (not copied) */
-                rd_list_t on_conf_destroy;    /* .. (not copied) */
-                rd_list_t on_new;             /* .. (copied) */
-                rd_list_t on_destroy;         /* .. (copied) */
-                rd_list_t on_send;            /* .. (copied) */
-                rd_list_t on_acknowledgement; /* .. (copied) */
-                rd_list_t on_consume;         /* .. (copied) */
-                rd_list_t on_commit;          /* .. (copied) */
-
-                /* rd_strtup_t list */
-                rd_list_t config;             /* Configuration name=val's
-                                               * handled by interceptors. */
-        } interceptors;
-
-        /* Client group configuration */
-        int    coord_query_intvl_ms;
-
-	int    builtin_features;
-	/*
-	 * Consumer configuration
-	 */
-        int    check_crcs;
-	int    queued_min_msgs;
-        int    queued_max_msg_kbytes;
-        int64_t queued_max_msg_bytes;
-	int    fetch_wait_max_ms;
-        int    fetch_msg_max_bytes;
-	int    fetch_min_bytes;
-	int    fetch_error_backoff_ms;
-        char  *group_id_str;
-
-        rd_kafka_pattern_list_t *topic_blacklist;
-        struct rd_kafka_topic_conf_s *topic_conf; /* Default topic config
-                                                   * for automatically
-                                                   * subscribed topics. */
-        int enable_auto_commit;
-	int enable_auto_offset_store;
-        int auto_commit_interval_ms;
-        int group_session_timeout_ms;
-        int group_heartbeat_intvl_ms;
-        rd_kafkap_str_t *group_protocol_type;
-        char *partition_assignment_strategy;
-        rd_list_t partition_assignors;
-	int enabled_assignor_cnt;
-        struct rd_kafka_assignor_s *assignor;
-
-        void (*rebalance_cb) (rd_kafka_t *rk,
-                              rd_kafka_resp_err_t err,
-			      rd_kafka_topic_partition_list_t *partitions,
-                              void *opaque);
-
-        void (*offset_commit_cb) (rd_kafka_t *rk,
-                                  rd_kafka_resp_err_t err,
-                                  rd_kafka_topic_partition_list_t *offsets,
-                                  void *opaque);
-
-        rd_kafka_offset_method_t offset_store_method;
-	int enable_partition_eof;
-
-	/*
-	 * Producer configuration
-	 */
-	int    queue_buffering_max_msgs;
-	int    queue_buffering_max_kbytes;
-	int    buffering_max_ms;
-	int    max_retries;
-	int    retry_backoff_ms;
-	int    batch_num_messages;
-	rd_kafka_compression_t compression_codec;
-	int    dr_err_only;
-
-	/* Message delivery report callback.
-	 * Called once for each produced message, either on
-	 * successful and acknowledged delivery to the broker in which
-	 * case 'err' is 0, or if the message could not be delivered
-	 * in which case 'err' is non-zero (use rd_kafka_err2str()
-	 * to obtain a human-readable error reason).
-	 *
-	 * If the message was produced with neither RD_KAFKA_MSG_F_FREE
-	 * or RD_KAFKA_MSG_F_COPY set then 'payload' is the original
-	 * pointer provided to rd_kafka_produce().
-	 * rdkafka will not perform any further actions on 'payload'
-	 * at this point and the application may rd_free the payload data
-	 * at this point.
-	 *
-	 * 'opaque' is 'conf.opaque', while 'msg_opaque' is
-	 * the opaque pointer provided in the rd_kafka_produce() call.
-	 */
-	void (*dr_cb) (rd_kafka_t *rk,
-		       void *payload, size_t len,
-		       rd_kafka_resp_err_t err,
-		       void *opaque, void *msg_opaque);
-
-        void (*dr_msg_cb) (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage,
-                           void *opaque);
-
-        /* Consume callback */
-        void (*consume_cb) (rd_kafka_message_t *rkmessage, void *opaque);
-
-        /* Log callback */
-        void (*log_cb) (const rd_kafka_t *rk, int level,
-                        const char *fac, const char *buf);
-        int    log_level;
-        int    log_queue;
-        int    log_thread_name;
-        int    log_connection_close;
-
-        /* Error callback */
-	void (*error_cb) (rd_kafka_t *rk, int err,
-			  const char *reason, void *opaque);
-
-	/* Throttle callback */
-	void (*throttle_cb) (rd_kafka_t *rk, const char *broker_name,
-			     int32_t broker_id, int throttle_time_ms,
-			     void *opaque);
-
-	/* Stats callback */
-	int (*stats_cb) (rd_kafka_t *rk,
-			 char *json,
-			 size_t json_len,
-			 void *opaque);
-
-        /* Socket creation callback */
-        int (*socket_cb) (int domain, int type, int protocol, void *opaque);
-
-        /* Connect callback */
-        int (*connect_cb) (int sockfd,
-                           const struct sockaddr *addr,
-                           int addrlen,
-                           const char *id,
-                           void *opaque);
-
-        /* Close socket callback */
-        int (*closesocket_cb) (int sockfd, void *opaque);
-
-		/* File open callback */
-        int (*open_cb) (const char *pathname, int flags, mode_t mode,
-                        void *opaque);
-
-	/* Opaque passed to callbacks. */
-	void  *opaque;
-
-        /* For use with value-less properties. */
-        int     dummy;
-};
-
-int rd_kafka_socket_cb_linux (int domain, int type, int protocol, void *opaque);
-int rd_kafka_socket_cb_generic (int domain, int type, int protocol,
-                                void *opaque);
-#ifndef _MSC_VER
-int rd_kafka_open_cb_linux (const char *pathname, int flags, mode_t mode,
-                            void *opaque);
-#endif
-int rd_kafka_open_cb_generic (const char *pathname, int flags, mode_t mode,
-                              void *opaque);
-
-
-
-struct rd_kafka_topic_conf_s {
-	int     required_acks;
-	int32_t request_timeout_ms;
-	int     message_timeout_ms;
-
-	int32_t (*partitioner) (const rd_kafka_topic_t *rkt,
-				const void *keydata, size_t keylen,
-				int32_t partition_cnt,
-				void *rkt_opaque,
-				void *msg_opaque);
-
-	rd_kafka_compression_t compression_codec;
-        int     produce_offset_report;
-
-        int     consume_callback_max_msgs;
-	int     auto_commit;
-	int     auto_commit_interval_ms;
-	int     auto_offset_reset;
-	char   *offset_store_path;
-	int     offset_store_sync_interval_ms;
-
-        rd_kafka_offset_method_t offset_store_method;
-
-	/* Application provided opaque pointer (this is rkt_opaque) */
-	void   *opaque;
-};
-
-
-
-void rd_kafka_anyconf_destroy (int scope, void *conf);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_event.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_event.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_event.c
deleted file mode 100644
index 5fe783d..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_event.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_event.h"
-#include "rd.h"
-
-rd_kafka_event_type_t rd_kafka_event_type (const rd_kafka_event_t *rkev) {
-	return rkev ? rkev->rko_evtype : RD_KAFKA_EVENT_NONE;
-}
-
-const char *rd_kafka_event_name (const rd_kafka_event_t *rkev) {
-	switch (rkev ? rkev->rko_evtype : RD_KAFKA_EVENT_NONE)
-	{
-	case RD_KAFKA_EVENT_NONE:
-		return "(NONE)";
-	case RD_KAFKA_EVENT_DR:
-		return "DeliveryReport";
-	case RD_KAFKA_EVENT_FETCH:
-		return "Fetch";
-	case RD_KAFKA_EVENT_LOG:
-		return "Log";
-	case RD_KAFKA_EVENT_ERROR:
-		return "Error";
-	case RD_KAFKA_EVENT_REBALANCE:
-		return "Rebalance";
-	case RD_KAFKA_EVENT_OFFSET_COMMIT:
-		return "OffsetCommit";
-	case RD_KAFKA_EVENT_STATS:
-		return "Stats";
-	default:
-		return "?unknown?";
-	}
-}
-
-
-
-
-void rd_kafka_event_destroy (rd_kafka_event_t *rkev) {
-	if (unlikely(!rkev))
-		return;
-	rd_kafka_op_destroy(rkev);
-}
-
-
-/**
- * @returns the next message from the event's message queue.
- * @remark messages will be freed automatically when event is destroyed,
- *         application MUST NOT call rd_kafka_message_destroy()
- */
-const rd_kafka_message_t *
-rd_kafka_event_message_next (rd_kafka_event_t *rkev) {
-	rd_kafka_op_t *rko = rkev;
-	rd_kafka_msg_t *rkm;
-	rd_kafka_msgq_t *rkmq, *rkmq2;
-	rd_kafka_message_t *rkmessage;
-
-	switch (rkev->rko_type)
-	{
-	case RD_KAFKA_OP_DR:
-		rkmq = &rko->rko_u.dr.msgq;
-		rkmq2 = &rko->rko_u.dr.msgq2;
-		break;
-
-	case RD_KAFKA_OP_FETCH:
-		/* Just one message */
-		if (rko->rko_u.fetch.evidx++ > 0)
-			return NULL;
-
-		rkmessage = rd_kafka_message_get(rko);
-		if (unlikely(!rkmessage))
-			return NULL;
-
-		/* Store offset */
-		rd_kafka_op_offset_store(NULL, rko, rkmessage);
-
-		return rkmessage;
-
-
-	default:
-		return NULL;
-	}
-
-	if (unlikely(!(rkm = TAILQ_FIRST(&rkmq->rkmq_msgs))))
-		return NULL;
-
-	rd_kafka_msgq_deq(rkmq, rkm, 1);
-
-	/* Put rkm on secondary message queue which will be purged later. */
-	rd_kafka_msgq_enq(rkmq2, rkm);
-
-	return rd_kafka_message_get_from_rkm(rko, rkm);
-}
-
-
-size_t rd_kafka_event_message_array (rd_kafka_event_t *rkev,
-				     const rd_kafka_message_t **rkmessages, size_t size) {
-	size_t cnt = 0;
-	const rd_kafka_message_t *rkmessage;
-
-	while ((rkmessage = rd_kafka_event_message_next(rkev)))
-		rkmessages[cnt++] = rkmessage;
-
-	return cnt;
-}
-
-
-size_t rd_kafka_event_message_count (rd_kafka_event_t *rkev) {
-	switch (rkev->rko_evtype)
-	{
-	case RD_KAFKA_EVENT_DR:
-		return rd_atomic32_get(&rkev->rko_u.dr.msgq.rkmq_msg_cnt);
-	case RD_KAFKA_EVENT_FETCH:
-		return 1;
-	default:
-		return 0;
-	}
-}
-
-
-rd_kafka_resp_err_t rd_kafka_event_error (rd_kafka_event_t *rkev) {
-	return rkev->rko_err;
-}
-
-const char *rd_kafka_event_error_string (rd_kafka_event_t *rkev) {
-	switch (rkev->rko_type)
-	{
-	case RD_KAFKA_OP_ERR:
-	case RD_KAFKA_OP_CONSUMER_ERR:
-		if (rkev->rko_u.err.errstr)
-			return rkev->rko_u.err.errstr;
-		/* FALLTHRU */
-	default:
-		return rd_kafka_err2str(rkev->rko_err);
-	}
-}
-
-
-void *rd_kafka_event_opaque (rd_kafka_event_t *rkev) {
-	switch (rkev->rko_type & ~RD_KAFKA_OP_FLAGMASK)
-	{
-	case RD_KAFKA_OP_OFFSET_COMMIT:
-		return rkev->rko_u.offset_commit.opaque;
-	default:
-		return NULL;
-	}
-}
-
-
-int rd_kafka_event_log (rd_kafka_event_t *rkev, const char **fac,
-			const char **str, int *level) {
-	if (unlikely(rkev->rko_evtype != RD_KAFKA_EVENT_LOG))
-		return -1;
-
-	if (likely(fac != NULL))
-                *fac = rkev->rko_u.log.fac;
-	if (likely(str != NULL))
-		*str = rkev->rko_u.log.str;
-	if (likely(level != NULL))
-		*level = rkev->rko_u.log.level;
-
-	return 0;
-}
-
-const char *rd_kafka_event_stats (rd_kafka_event_t *rkev) {
-	return rkev->rko_u.stats.json;
-}
-
-rd_kafka_topic_partition_list_t *
-rd_kafka_event_topic_partition_list (rd_kafka_event_t *rkev) {
-	switch (rkev->rko_evtype)
-	{
-	case RD_KAFKA_EVENT_REBALANCE:
-		return rkev->rko_u.rebalance.partitions;
-	case RD_KAFKA_EVENT_OFFSET_COMMIT:
-		return rkev->rko_u.offset_commit.partitions;
-	default:
-		return NULL;
-	}
-}
-
-
-rd_kafka_topic_partition_t *
-rd_kafka_event_topic_partition (rd_kafka_event_t *rkev) {
-	rd_kafka_topic_partition_t *rktpar;
-
-	if (unlikely(!rkev->rko_rktp))
-		return NULL;
-
-	rktpar = rd_kafka_topic_partition_new_from_rktp(
-		rd_kafka_toppar_s2i(rkev->rko_rktp));
-
-	switch (rkev->rko_type)
-	{
-	case RD_KAFKA_OP_ERR:
-	case RD_KAFKA_OP_CONSUMER_ERR:
-		rktpar->offset = rkev->rko_u.err.offset;
-		break;
-	default:
-		break;
-	}
-
-	rktpar->err = rkev->rko_err;
-
-	return rktpar;
-
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_event.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_event.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_event.h
deleted file mode 100644
index 0e8f8a1..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_event.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * @brief Converts op type to event type.
- * @returns the event type, or 0 if the op cannot be mapped to an event.
- */
-static RD_UNUSED RD_INLINE
-rd_kafka_event_type_t rd_kafka_op2event (rd_kafka_op_type_t optype) {
-	static const rd_kafka_event_type_t map[RD_KAFKA_OP__END] = {
-		[RD_KAFKA_OP_DR] = RD_KAFKA_EVENT_DR,
-		[RD_KAFKA_OP_FETCH] = RD_KAFKA_EVENT_FETCH,
-		[RD_KAFKA_OP_ERR] = RD_KAFKA_EVENT_ERROR,
-		[RD_KAFKA_OP_CONSUMER_ERR] = RD_KAFKA_EVENT_ERROR,
-		[RD_KAFKA_OP_REBALANCE] = RD_KAFKA_EVENT_REBALANCE,
-		[RD_KAFKA_OP_OFFSET_COMMIT] = RD_KAFKA_EVENT_OFFSET_COMMIT,
-                [RD_KAFKA_OP_LOG] = RD_KAFKA_EVENT_LOG,
-		[RD_KAFKA_OP_STATS] = RD_KAFKA_EVENT_STATS
-	};
-
-	return map[(int)optype & ~RD_KAFKA_OP_FLAGMASK];
-}
-
-
-/**
- * @brief Attempt to set up an event based on rko.
- * @returns 1 if op is event:able and set up, else 0.
- */
-static RD_UNUSED RD_INLINE
-int rd_kafka_event_setup (rd_kafka_t *rk, rd_kafka_op_t *rko) {
-	rko->rko_evtype = rd_kafka_op2event(rko->rko_type);
-	switch (rko->rko_evtype)
-	{
-	case RD_KAFKA_EVENT_NONE:
-		return 0;
-
-	case RD_KAFKA_EVENT_DR:
-		rko->rko_rk = rk;
-		rd_dassert(!rko->rko_u.dr.do_purge2);
-		rd_kafka_msgq_init(&rko->rko_u.dr.msgq2);
-		rko->rko_u.dr.do_purge2 = 1;
-		return 1;
-
-	case RD_KAFKA_EVENT_REBALANCE:
-	case RD_KAFKA_EVENT_ERROR:
-        case RD_KAFKA_EVENT_LOG:
-        case RD_KAFKA_EVENT_OFFSET_COMMIT:
-        case RD_KAFKA_EVENT_STATS:
-		return 1;
-
-	default:
-		return 0;
-		
-	}
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_feature.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_feature.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_feature.c
deleted file mode 100644
index 8a9ab24..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_feature.c
+++ /dev/null
@@ -1,444 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rdkafka_int.h"
-#include "rdkafka_feature.h"
-
-#include <stdlib.h>
-
-static const char *rd_kafka_feature_names[] = {
-	"MsgVer1",
-	"ApiVersion",
-	"BrokerBalancedConsumer",
-	"ThrottleTime",
-	"Sasl",
-	"SaslHandshake",
-	"BrokerGroupCoordinator",
-	"LZ4",
-        "OffsetTime",
-        "MsgVer2",
-	NULL
-};
-
-
-static const struct rd_kafka_feature_map {
-	/* RD_KAFKA_FEATURE_... */
-	int feature;
-
-	/* Depends on the following ApiVersions overlapping with
-	 * what the broker supports: */
-	struct rd_kafka_ApiVersion depends[RD_KAFKAP__NUM];
-
-} rd_kafka_feature_map[] = {
-	/**
-	 * @brief List of features and the ApiVersions they depend on.
-	 *
-	 * The dependency list consists of the ApiKey followed by this
-	 * client's supported minimum and maximum API versions.
-	 * As long as this list and its versions overlaps with the
-	 * broker supported API versions the feature will be enabled.
-	 */
-	{
-
-		/* @brief >=0.10.0: Message.MagicByte version 1:
-		 * Relative offsets (KIP-31) and message timestamps (KIP-32). */
-		.feature = RD_KAFKA_FEATURE_MSGVER1,
-		.depends = {
-			{ RD_KAFKAP_Produce, 2, 2 },
-			{ RD_KAFKAP_Fetch, 2, 2 },
-			{ -1 },
-		},
-	},
-        {
-                /* @brief >=0.11.0: Message.MagicByte version 2 */
-                .feature = RD_KAFKA_FEATURE_MSGVER2,
-                .depends = {
-                        { RD_KAFKAP_Produce, 3, 3 },
-                        { RD_KAFKAP_Fetch, 4, 4 },
-                        { -1 },
-                },
-        },
-	{
-		
-		/* @brief >=0.10.0: ApiVersionQuery support.
-		 * @remark This is a bit of chicken-and-egg problem but needs to be
-		 *         set by feature_check() to avoid the feature being cleared
-		 *         even when broker supports it. */
-		.feature = RD_KAFKA_FEATURE_APIVERSION,
-		.depends = {
-			{ RD_KAFKAP_ApiVersion, 0, 0 },
-			{ -1 },
-		},
-	},
-	{
-		/* @brief >=0.8.2.0: Broker-based Group coordinator */
-		.feature = RD_KAFKA_FEATURE_BROKER_GROUP_COORD,
-		.depends = {
-			{ RD_KAFKAP_GroupCoordinator, 0, 0 },
-			{ -1 },
-		},
-	},
-	{
-		/* @brief >=0.9.0: Broker-based balanced consumer groups. */
-		.feature = RD_KAFKA_FEATURE_BROKER_BALANCED_CONSUMER,
-		.depends = {
-			{ RD_KAFKAP_GroupCoordinator, 0, 0 },
-			{ RD_KAFKAP_OffsetCommit, 1, 2 },
-			{ RD_KAFKAP_OffsetFetch, 1, 1 },
-			{ RD_KAFKAP_JoinGroup, 0, 0 },
-			{ RD_KAFKAP_SyncGroup, 0, 0 },
-			{ RD_KAFKAP_Heartbeat, 0, 0 },
-			{ RD_KAFKAP_LeaveGroup, 0, 0 },
-			{ -1 },
-		},
-	},
-	{
-		/* @brief >=0.9.0: ThrottleTime */
-		.feature = RD_KAFKA_FEATURE_THROTTLETIME,
-		.depends = {
-			{ RD_KAFKAP_Produce, 1, 2 },
-			{ RD_KAFKAP_Fetch, 1, 2 },
-			{ -1 },
-		},
-
-        },
-        {
-                /* @brief >=0.9.0: SASL (GSSAPI) authentication.
-                 * Since SASL is not using the Kafka protocol
-                 * we must use something else to map us to the
-                 * proper broker version support:
-                 * JoinGroup was released along with SASL in 0.9.0. */
-                .feature = RD_KAFKA_FEATURE_SASL_GSSAPI,
-                .depends = {
-                        { RD_KAFKAP_JoinGroup, 0, 0 },
-                        { -1 },
-                },
-        },
-        {
-                /* @brief >=0.10.0: SASL mechanism handshake (KIP-43)
-                 *                  to automatically support other mechanisms
-                 *                  than GSSAPI, such as PLAIN. */
-                .feature = RD_KAFKA_FEATURE_SASL_HANDSHAKE,
-                .depends = {
-                        { RD_KAFKAP_SaslHandshake, 0, 0 },
-                        { -1 },
-                },
-        },
-        {
-                /* @brief >=0.8.2: LZ4 compression.
-                 * Since LZ4 initially did not rely on a specific API
-                 * type or version (it does in >=0.10.0)
-                 * we must use something else to map us to the
-                 * proper broker version support:
-                 * GrooupCoordinator was released in 0.8.2 */
-                .feature = RD_KAFKA_FEATURE_LZ4,
-                .depends = {
-                        { RD_KAFKAP_GroupCoordinator, 0, 0 },
-                        { -1 },
-                },
-        },
-        {
-                /* @brief >=0.10.1.0: Offset v1 (KIP-79)
-                 * Time-based offset requests */
-                .feature = RD_KAFKA_FEATURE_OFFSET_TIME,
-                .depends = {
-                        { RD_KAFKAP_Offset, 1, 1 },
-                        { -1 },
-                }
-        },
-        { .feature = 0 }, /* sentinel */
-};
-
-
-
-/**
- * @brief In absence of KIP-35 support in earlier broker versions we provide hardcoded
- *        lists that corresponds to older broker versions.
- */
-
-/* >= 0.10.0.0: dummy for all future versions that support ApiVersionRequest */
-static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_Queryable[] = {
-	{ RD_KAFKAP_ApiVersion, 0, 0 }
-};
-
-
-/* =~ 0.9.0 */
-static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_9_0[] = {
-	{ RD_KAFKAP_Produce, 0, 1 },
-	{ RD_KAFKAP_Fetch, 0, 1 },
-	{ RD_KAFKAP_Offset, 0, 0 },
-	{ RD_KAFKAP_Metadata, 0, 0 },
-	{ RD_KAFKAP_OffsetCommit, 0, 2 },
-	{ RD_KAFKAP_OffsetFetch, 0, 1 },
-	{ RD_KAFKAP_GroupCoordinator, 0, 0 },
-	{ RD_KAFKAP_JoinGroup, 0, 0 },
-	{ RD_KAFKAP_Heartbeat, 0, 0 },
-	{ RD_KAFKAP_LeaveGroup, 0, 0 },
-	{ RD_KAFKAP_SyncGroup, 0, 0 },
-	{ RD_KAFKAP_DescribeGroups, 0, 0 },
-	{ RD_KAFKAP_ListGroups, 0, 0 }
-};
-
-/* =~ 0.8.2 */
-static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_2[] = {
-	{ RD_KAFKAP_Produce, 0, 0 },
-	{ RD_KAFKAP_Fetch, 0, 0 },
-	{ RD_KAFKAP_Offset, 0, 0 },
-	{ RD_KAFKAP_Metadata, 0, 0 },
-	{ RD_KAFKAP_OffsetCommit, 0, 1 },
-	{ RD_KAFKAP_OffsetFetch, 0, 1 },
-	{ RD_KAFKAP_GroupCoordinator, 0, 0 }
-};
-
-/* =~ 0.8.1 */
-static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_1[] = {
-	{ RD_KAFKAP_Produce, 0, 0 },
-	{ RD_KAFKAP_Fetch, 0, 0 },
-	{ RD_KAFKAP_Offset, 0, 0 },
-	{ RD_KAFKAP_Metadata, 0, 0 },
-	{ RD_KAFKAP_OffsetCommit, 0, 1 },
-	{ RD_KAFKAP_OffsetFetch, 0, 0 }
-};
-
-/* =~ 0.8.0 */
-static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_0[] = {
-	{ RD_KAFKAP_Produce, 0, 0 },
-	{ RD_KAFKAP_Fetch, 0, 0 },
-	{ RD_KAFKAP_Offset, 0, 0 },
-	{ RD_KAFKAP_Metadata, 0, 0 }
-};
-
-
-/**
- * @brief Returns the ApiVersion list for legacy broker versions that do not
- *        support the ApiVersionQuery request. E.g., brokers <0.10.0.
- *
- * @param broker_version Broker version to match (longest prefix matching).
- * @param use_default If no match is found return the default APIs (but return 0).
- *
- * @returns 1 if \p broker_version was recognized: \p *apisp will point to
- *          the ApiVersion list and *api_cntp will be set to its element count.
- *          0 if \p broker_version was not recognized: \p *apisp remains unchanged.
- *
- */
-int rd_kafka_get_legacy_ApiVersions (const char *broker_version,
-				     struct rd_kafka_ApiVersion **apisp,
-				     size_t *api_cntp, const char *fallback) {
-	static const struct {
-		const char *pfx;
-		struct rd_kafka_ApiVersion *apis;
-		size_t api_cnt;
-	} vermap[] = {
-#define _VERMAP(PFX,APIS) { PFX, APIS, RD_ARRAYSIZE(APIS) }
-		_VERMAP("0.9.0", rd_kafka_ApiVersion_0_9_0),
-		_VERMAP("0.8.2", rd_kafka_ApiVersion_0_8_2),
-		_VERMAP("0.8.1", rd_kafka_ApiVersion_0_8_1),
-		_VERMAP("0.8.0", rd_kafka_ApiVersion_0_8_0),
-		{ "0.7.", NULL }, /* Unsupported */
-		{ "0.6.", NULL }, /* Unsupported */
-		_VERMAP("", rd_kafka_ApiVersion_Queryable),
-		{ NULL }
-	};
-	int i;
-	int fallback_i = -1;
-        int ret = 0;
-
-        *apisp = NULL;
-        *api_cntp = 0;
-
-	for (i = 0 ; vermap[i].pfx ; i++) {
-		if (!strncmp(vermap[i].pfx, broker_version, strlen(vermap[i].pfx))) {
-			if (!vermap[i].apis)
-				return 0;
-			*apisp = vermap[i].apis;
-			*api_cntp = vermap[i].api_cnt;
-                        ret = 1;
-                        break;
-		} else if (fallback && !strcmp(vermap[i].pfx, fallback))
-			fallback_i = i;
-	}
-
-	if (!*apisp && fallback) {
-		rd_kafka_assert(NULL, fallback_i != -1);
-		*apisp    = vermap[fallback_i].apis;
-		*api_cntp = vermap[fallback_i].api_cnt;
-	}
-
-        return ret;
-}
-
-
-/**
- * @returns 1 if the provided broker version (probably)
- *          supports api.version.request.
- */
-int rd_kafka_ApiVersion_is_queryable (const char *broker_version) {
-	struct rd_kafka_ApiVersion *apis;
-	size_t api_cnt;
-
-
-	if (!rd_kafka_get_legacy_ApiVersions(broker_version,
-					     &apis, &api_cnt, 0))
-		return 0;
-
-	return apis == rd_kafka_ApiVersion_Queryable;
-}
-
-
-
-
-	
-/**
- * @brief Check if match's versions overlaps with \p apis.
- *
- * @returns 1 if true, else 0.
- * @remark \p apis must be sorted using rd_kafka_ApiVersion_key_cmp()
- */
-static RD_INLINE int
-rd_kafka_ApiVersion_check (const struct rd_kafka_ApiVersion *apis, size_t api_cnt,
-			   const struct rd_kafka_ApiVersion *match) {
-	const struct rd_kafka_ApiVersion *api;
-
-	api = bsearch(match, apis, api_cnt, sizeof(*apis),
-		      rd_kafka_ApiVersion_key_cmp);
-	if (unlikely(!api))
-		return 0;
-
-	return match->MinVer <= api->MaxVer && api->MinVer <= match->MaxVer;
-}
-
-
-/**
- * @brief Compare broker's supported API versions to our feature request map
- *        and enable/disable features accordingly.
- *
- * @param broker_apis Broker's supported APIs. If NULL the
- *        \p broker.version.fallback configuration property will specify a
- *        default legacy version to use.
- * @param broker_api_cnt Number of elements in \p broker_apis
- *
- * @returns the supported features (bitmask) to enable.
- */
-int rd_kafka_features_check (rd_kafka_broker_t *rkb,
-			     struct rd_kafka_ApiVersion *broker_apis,
-			     size_t broker_api_cnt) {
-	int features = 0;
-	int i;
-
-	/* Scan through features. */
-	for (i = 0 ; rd_kafka_feature_map[i].feature != 0 ; i++) {
-		const struct rd_kafka_ApiVersion *match;
-		int fails = 0;
-
-		/* For each feature check that all its API dependencies
-		 * can be fullfilled. */
-
-		for (match = &rd_kafka_feature_map[i].depends[0] ;
-		     match->ApiKey != -1 ; match++) {
-			int r;
-			
-			r = rd_kafka_ApiVersion_check(broker_apis, broker_api_cnt,
-						      match);
-
-			rd_rkb_dbg(rkb, FEATURE, "APIVERSION",
-				   " Feature %s: %s (%hd..%hd) "
-				   "%ssupported by broker",
-				   rd_kafka_features2str(rd_kafka_feature_map[i].
-							feature),
-				   rd_kafka_ApiKey2str(match->ApiKey),
-				   match->MinVer, match->MaxVer,
-				   r ? "" : "NOT ");
-
-			fails += !r;
-		}
-
-		rd_rkb_dbg(rkb, FEATURE, "APIVERSION",
-			   "%s feature %s",
-			   fails ? "Disabling" : "Enabling",
-			   rd_kafka_features2str(rd_kafka_feature_map[i].feature));
-
-
-		if (!fails)
-			features |= rd_kafka_feature_map[i].feature;
-	}
-
-	return features;
-}
-
-
-
-/**
- * @brief Make an allocated and sorted copy of \p src.
- */
-void
-rd_kafka_ApiVersions_copy (const struct rd_kafka_ApiVersion *src,
-                           size_t src_cnt,
-                           struct rd_kafka_ApiVersion **dstp,
-                           size_t *dst_cntp) {
-        *dstp = rd_memdup(src, sizeof(*src) * src_cnt);
-        *dst_cntp = src_cnt;
-        qsort(*dstp, *dst_cntp, sizeof(**dstp), rd_kafka_ApiVersion_key_cmp);
-}
-
-
-
-
-
-
-/**
- * @returns a human-readable feature flag string.
- */
-const char *rd_kafka_features2str (int features) {
-	static RD_TLS char ret[4][128];
-	size_t of = 0;
-	static RD_TLS int reti = 0;
-	int i;
-
-	reti = (reti + 1) % 4;
-
-	*ret[reti] = '\0';
-	for (i = 0 ; rd_kafka_feature_names[i] ; i++) {
-		int r;
-		if (!(features & (1 << i)))
-			continue;
-
-		r = rd_snprintf(ret[reti]+of, sizeof(ret[reti])-of, "%s%s",
-				of == 0 ? "" : ",",
-				rd_kafka_feature_names[i]);
-		if ((size_t)r > sizeof(ret[reti])-of) {
-			/* Out of space */
-			memcpy(&ret[reti][sizeof(ret[reti])-3], "..", 3);
-			break;
-		}
-
-		of += r;
-	}
-
-	return ret[reti];
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_feature.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_feature.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_feature.h
deleted file mode 100644
index 1e40664..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_feature.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#pragma once
-
-
-/**
- * @brief Kafka protocol features
- */
-
-/* Message version 1 (MagicByte=1):
- *  + relative offsets (KIP-31)
- *  + timestamps (KIP-32) */
-#define RD_KAFKA_FEATURE_MSGVER1    0x1
-
-/* ApiVersionQuery support (KIP-35) */
-#define RD_KAFKA_FEATURE_APIVERSION 0x2
-
- /* >= 0.9: Broker-based Balanced Consumer */
-#define RD_KAFKA_FEATURE_BROKER_BALANCED_CONSUMER 0x4
-
-/* >= 0.9: Produce/Fetch ThrottleTime reporting */
-#define RD_KAFKA_FEATURE_THROTTLETIME 0x8
-
-/* >= 0.9: SASL GSSAPI support */
-#define RD_KAFKA_FEATURE_SASL_GSSAPI    0x10
-
-/* >= 0.10: SaslMechanismRequest (KIP-43) */
-#define RD_KAFKA_FEATURE_SASL_HANDSHAKE 0x20
-
-/* >= 0.8.2.0: Broker-based Group coordinator */
-#define RD_KAFKA_FEATURE_BROKER_GROUP_COORD 0x40
-
-/* >= 0.8.2.0: LZ4 compression (with bad and proper HC checksums) */
-#define RD_KAFKA_FEATURE_LZ4 0x80
-
-/* >= 0.10.1.0: Time-based Offset fetch (KIP-79) */
-#define RD_KAFKA_FEATURE_OFFSET_TIME 0x100
-
-/* >= 0.11.0.0: Message version 2 (MagicByte=2):
- *  + EOS message format KIP-98 */
-#define RD_KAFKA_FEATURE_MSGVER2     0x200
-
-
-int rd_kafka_get_legacy_ApiVersions (const char *broker_version,
-				     struct rd_kafka_ApiVersion **apisp,
-				     size_t *api_cntp, const char *fallback);
-int rd_kafka_ApiVersion_is_queryable (const char *broker_version);
-void rd_kafka_ApiVersions_copy (const struct rd_kafka_ApiVersion *src, size_t src_cnt,
-				struct rd_kafka_ApiVersion **dstp, size_t *dst_cntp);
-int rd_kafka_features_check (rd_kafka_broker_t *rkb,
-			     struct rd_kafka_ApiVersion *broker_apis,
-			     size_t broker_api_cnt);
-
-const char *rd_kafka_features2str (int features);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_int.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_int.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_int.h
deleted file mode 100644
index 792c1a5..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_int.h
+++ /dev/null
@@ -1,435 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-
-#ifndef _MSC_VER
-#define _GNU_SOURCE  /* for strndup() */
-#include <syslog.h>
-#else
-typedef int mode_t;
-#endif
-#include <fcntl.h>
-
-
-#include "rdsysqueue.h"
-
-#include "rdkafka.h"
-#include "rd.h"
-#include "rdlog.h"
-#include "rdtime.h"
-#include "rdaddr.h"
-#include "rdinterval.h"
-#include "rdavg.h"
-#include "rdlist.h"
-
-#if WITH_SSL
-#include <openssl/ssl.h>
-#endif
-
-
-
-
-typedef struct rd_kafka_itopic_s rd_kafka_itopic_t;
-typedef struct rd_ikafka_s rd_ikafka_t;
-
-
-#define rd_kafka_assert(rk, cond) do {                                  \
-                if (unlikely(!(cond)))                                  \
-                        rd_kafka_crash(__FILE__,__LINE__, __FUNCTION__, \
-                                       (rk), "assert: " # cond);        \
-        } while (0)
-
-
-void
-RD_NORETURN
-rd_kafka_crash (const char *file, int line, const char *function,
-                rd_kafka_t *rk, const char *reason);
-
-
-/* Forward declarations */
-struct rd_kafka_s;
-struct rd_kafka_itopic_s;
-struct rd_kafka_msg_s;
-struct rd_kafka_broker_s;
-
-typedef RD_SHARED_PTR_TYPE(, struct rd_kafka_toppar_s) shptr_rd_kafka_toppar_t;
-typedef RD_SHARED_PTR_TYPE(, struct rd_kafka_itopic_s) shptr_rd_kafka_itopic_t;
-
-
-
-#include "rdkafka_op.h"
-#include "rdkafka_queue.h"
-#include "rdkafka_msg.h"
-#include "rdkafka_proto.h"
-#include "rdkafka_buf.h"
-#include "rdkafka_pattern.h"
-#include "rdkafka_conf.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_timer.h"
-#include "rdkafka_assignor.h"
-#include "rdkafka_metadata.h"
-
-
-/**
- * Protocol level sanity
- */
-#define RD_KAFKAP_BROKERS_MAX     1000
-#define RD_KAFKAP_TOPICS_MAX      1000000
-#define RD_KAFKAP_PARTITIONS_MAX  10000
-
-
-#define RD_KAFKA_OFFSET_IS_LOGICAL(OFF)  ((OFF) < 0)
-
-
-
-
-
-
-
-/**
- * Kafka handle, internal representation of the application's rd_kafka_t.
- */
-
-typedef RD_SHARED_PTR_TYPE(shptr_rd_ikafka_s, rd_ikafka_t) shptr_rd_ikafka_t;
-
-struct rd_kafka_s {
-	rd_kafka_q_t *rk_rep;   /* kafka -> application reply queue */
-	rd_kafka_q_t *rk_ops;   /* any -> rdkafka main thread ops */
-
-	TAILQ_HEAD(, rd_kafka_broker_s) rk_brokers;
-        rd_list_t                  rk_broker_by_id; /* Fast id lookups. */
-	rd_atomic32_t              rk_broker_cnt;
-	rd_atomic32_t              rk_broker_down_cnt;
-        mtx_t                      rk_internal_rkb_lock;
-	rd_kafka_broker_t         *rk_internal_rkb;
-
-	/* Broadcasting of broker state changes to wake up
-	 * functions waiting for a state change. */
-	cnd_t                      rk_broker_state_change_cnd;
-	mtx_t                      rk_broker_state_change_lock;
-	int                        rk_broker_state_change_version;
-
-
-	TAILQ_HEAD(, rd_kafka_itopic_s)  rk_topics;
-	int              rk_topic_cnt;
-
-        struct rd_kafka_cgrp_s *rk_cgrp;
-
-        rd_kafka_conf_t  rk_conf;
-        rd_kafka_q_t    *rk_logq;          /* Log queue if `log.queue` set */
-        char             rk_name[128];
-	rd_kafkap_str_t *rk_client_id;
-        rd_kafkap_str_t *rk_group_id;    /* Consumer group id */
-
-	int              rk_flags;
-	rd_atomic32_t    rk_terminate;
-	rwlock_t         rk_lock;
-	rd_kafka_type_t  rk_type;
-	struct timeval   rk_tv_state_change;
-
-	rd_atomic32_t    rk_last_throttle;  /* Last throttle_time_ms value
-					     * from broker. */
-
-        /* Locks: rd_kafka_*lock() */
-        rd_ts_t          rk_ts_metadata;    /* Timestamp of most recent
-                                             * metadata. */
-
-	struct rd_kafka_metadata *rk_full_metadata; /* Last full metadata. */
-	rd_ts_t          rk_ts_full_metadata;       /* Timesstamp of .. */
-        struct rd_kafka_metadata_cache rk_metadata_cache; /* Metadata cache */
-
-        char            *rk_clusterid;      /* ClusterId from metadata */
-
-        /* Simple consumer count:
-         *  >0: Running in legacy / Simple Consumer mode,
-         *   0: No consumers running
-         *  <0: Running in High level consumer mode */
-        rd_atomic32_t    rk_simple_cnt;
-
-        /**
-         * Exactly Once Semantics
-         */
-        struct {
-                rd_kafkap_str_t *TransactionalId;
-                int64_t          PID;
-                int16_t          ProducerEpoch;
-        } rk_eos;
-
-	const rd_kafkap_bytes_t *rk_null_bytes;
-
-	struct {
-		mtx_t lock;       /* Protects acces to this struct */
-		cnd_t cnd;        /* For waking up blocking injectors */
-		unsigned int cnt; /* Current message count */
-		size_t size;      /* Current message size sum */
-	        unsigned int max_cnt; /* Max limit */
-		size_t max_size; /* Max limit */
-	} rk_curr_msgs;
-
-        rd_kafka_timers_t rk_timers;
-	thrd_t rk_thread;
-
-        int rk_initialized;
-};
-
-#define rd_kafka_wrlock(rk)    rwlock_wrlock(&(rk)->rk_lock)
-#define rd_kafka_rdlock(rk)    rwlock_rdlock(&(rk)->rk_lock)
-#define rd_kafka_rdunlock(rk)    rwlock_rdunlock(&(rk)->rk_lock)
-#define rd_kafka_wrunlock(rk)    rwlock_wrunlock(&(rk)->rk_lock)
-
-/**
- * @brief Add \p cnt messages and of total size \p size bytes to the
- *        internal bookkeeping of current message counts.
- *        If the total message count or size after add would exceed the
- *        configured limits \c queue.buffering.max.messages and
- *        \c queue.buffering.max.kbytes then depending on the value of
- *        \p block the function either blocks until enough space is available
- *        if \p block is 1, else immediately returns
- *        RD_KAFKA_RESP_ERR__QUEUE_FULL.
- */
-static RD_INLINE RD_UNUSED rd_kafka_resp_err_t
-rd_kafka_curr_msgs_add (rd_kafka_t *rk, unsigned int cnt, size_t size,
-			int block) {
-
-	if (rk->rk_type != RD_KAFKA_PRODUCER)
-		return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-	mtx_lock(&rk->rk_curr_msgs.lock);
-	while (unlikely(rk->rk_curr_msgs.cnt + cnt >
-			rk->rk_curr_msgs.max_cnt ||
-			(unsigned long long)(rk->rk_curr_msgs.size + size) >
-			(unsigned long long)rk->rk_curr_msgs.max_size)) {
-		if (!block) {
-			mtx_unlock(&rk->rk_curr_msgs.lock);
-			return RD_KAFKA_RESP_ERR__QUEUE_FULL;
-		}
-
-		cnd_wait(&rk->rk_curr_msgs.cnd, &rk->rk_curr_msgs.lock);
-	}
-
-	rk->rk_curr_msgs.cnt  += cnt;
-	rk->rk_curr_msgs.size += size;
-	mtx_unlock(&rk->rk_curr_msgs.lock);
-
-	return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Subtract \p cnt messages of total size \p size from the
- *        current bookkeeping and broadcast a wakeup on the condvar
- *        for any waiting & blocking threads.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_curr_msgs_sub (rd_kafka_t *rk, unsigned int cnt, size_t size) {
-        int broadcast = 0;
-
-	if (rk->rk_type != RD_KAFKA_PRODUCER)
-		return;
-
-	mtx_lock(&rk->rk_curr_msgs.lock);
-	rd_kafka_assert(NULL,
-			rk->rk_curr_msgs.cnt >= cnt &&
-			rk->rk_curr_msgs.size >= size);
-
-        /* If the subtraction would pass one of the thresholds
-         * broadcast a wake-up to any waiting listeners. */
-        if ((rk->rk_curr_msgs.cnt >= rk->rk_curr_msgs.max_cnt &&
-             rk->rk_curr_msgs.cnt - cnt < rk->rk_curr_msgs.max_cnt) ||
-            (rk->rk_curr_msgs.size >= rk->rk_curr_msgs.max_size &&
-             rk->rk_curr_msgs.size - size < rk->rk_curr_msgs.max_size))
-                broadcast = 1;
-
-	rk->rk_curr_msgs.cnt  -= cnt;
-	rk->rk_curr_msgs.size -= size;
-
-        if (unlikely(broadcast))
-                cnd_broadcast(&rk->rk_curr_msgs.cnd);
-
-	mtx_unlock(&rk->rk_curr_msgs.lock);
-}
-
-static RD_INLINE RD_UNUSED void
-rd_kafka_curr_msgs_get (rd_kafka_t *rk, unsigned int *cntp, size_t *sizep) {
-	if (rk->rk_type != RD_KAFKA_PRODUCER) {
-		*cntp = 0;
-		*sizep = 0;
-		return;
-	}
-
-	mtx_lock(&rk->rk_curr_msgs.lock);
-	*cntp = rk->rk_curr_msgs.cnt;
-	*sizep = rk->rk_curr_msgs.size;
-	mtx_unlock(&rk->rk_curr_msgs.lock);
-}
-
-static RD_INLINE RD_UNUSED int
-rd_kafka_curr_msgs_cnt (rd_kafka_t *rk) {
-	int cnt;
-	if (rk->rk_type != RD_KAFKA_PRODUCER)
-		return 0;
-
-	mtx_lock(&rk->rk_curr_msgs.lock);
-	cnt = rk->rk_curr_msgs.cnt;
-	mtx_unlock(&rk->rk_curr_msgs.lock);
-
-	return cnt;
-}
-
-
-void rd_kafka_destroy_final (rd_kafka_t *rk);
-
-
-/**
- * Returns true if 'rk' handle is terminating.
- */
-#define rd_kafka_terminating(rk) (rd_atomic32_get(&(rk)->rk_terminate))
-
-#define rd_kafka_is_simple_consumer(rk) \
-        (rd_atomic32_get(&(rk)->rk_simple_cnt) > 0)
-int rd_kafka_simple_consumer_add (rd_kafka_t *rk);
-
-
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-/**
- * Debug contexts
- */
-#define RD_KAFKA_DBG_GENERIC        0x1
-#define RD_KAFKA_DBG_BROKER         0x2
-#define RD_KAFKA_DBG_TOPIC          0x4
-#define RD_KAFKA_DBG_METADATA       0x8
-#define RD_KAFKA_DBG_FEATURE        0x10
-#define RD_KAFKA_DBG_QUEUE          0x20
-#define RD_KAFKA_DBG_MSG            0x40
-#define RD_KAFKA_DBG_PROTOCOL       0x80
-#define RD_KAFKA_DBG_CGRP           0x100
-#define RD_KAFKA_DBG_SECURITY       0x200
-#define RD_KAFKA_DBG_FETCH          0x400
-#define RD_KAFKA_DBG_INTERCEPTOR    0x800
-#define RD_KAFKA_DBG_PLUGIN         0x1000
-#define RD_KAFKA_DBG_ALL            0xffff
-
-
-void rd_kafka_log0(const rd_kafka_conf_t *conf,
-                   const rd_kafka_t *rk, const char *extra, int level,
-                   const char *fac, const char *fmt, ...) RD_FORMAT(printf,
-                                                                    6, 7);
-
-#define rd_kafka_log(rk,level,fac,...) \
-        rd_kafka_log0(&rk->rk_conf, rk, NULL, level, fac, __VA_ARGS__)
-#define rd_kafka_dbg(rk,ctx,fac,...) do {                               \
-                if (unlikely((rk)->rk_conf.debug & (RD_KAFKA_DBG_ ## ctx))) \
-                        rd_kafka_log0(&rk->rk_conf,rk,NULL,             \
-                                      LOG_DEBUG,fac,__VA_ARGS__);       \
-        } while (0)
-
-/* dbg() not requiring an rk, just the conf object, for early logging */
-#define rd_kafka_dbg0(conf,ctx,fac,...) do {                            \
-                if (unlikely((conf)->debug & (RD_KAFKA_DBG_ ## ctx)))   \
-                        rd_kafka_log0(conf,NULL,NULL,                   \
-                                      LOG_DEBUG,fac,__VA_ARGS__);       \
-        } while (0)
-
-/* NOTE: The local copy of _logname is needed due rkb_logname_lock lock-ordering
- *       when logging another broker's name in the message. */
-#define rd_rkb_log(rkb,level,fac,...) do {				\
-		char _logname[RD_KAFKA_NODENAME_SIZE];			\
-                mtx_lock(&(rkb)->rkb_logname_lock);                     \
-		strncpy(_logname, rkb->rkb_logname, sizeof(_logname)-1); \
-		_logname[RD_KAFKA_NODENAME_SIZE-1] = '\0';		\
-                mtx_unlock(&(rkb)->rkb_logname_lock);                   \
-		rd_kafka_log0(&(rkb)->rkb_rk->rk_conf, \
-                              (rkb)->rkb_rk, _logname,                  \
-                              level, fac, __VA_ARGS__);                 \
-        } while (0)
-
-#define rd_rkb_dbg(rkb,ctx,fac,...) do {				\
-		if (unlikely((rkb)->rkb_rk->rk_conf.debug &		\
-			     (RD_KAFKA_DBG_ ## ctx))) {			\
-			rd_rkb_log(rkb, LOG_DEBUG, fac, __VA_ARGS__);	\
-                }                                                       \
-	} while (0)
-
-
-
-extern rd_kafka_resp_err_t RD_TLS rd_kafka_last_error_code;
-
-static RD_UNUSED RD_INLINE
-rd_kafka_resp_err_t rd_kafka_set_last_error (rd_kafka_resp_err_t err,
-					     int errnox) {
-        if (errnox) {
-#ifdef _MSC_VER
-                /* This is the correct way to set errno on Windows,
-                 * but it is still pointless due to different errnos in
-                 * in different runtimes:
-                 * https://social.msdn.microsoft.com/Forums/vstudio/en-US/b4500c0d-1b69-40c7-9ef5-08da1025b5bf/setting-errno-from-within-a-dll?forum=vclanguage/
-                 * errno is thus highly deprecated, and buggy, on Windows
-                 * when using librdkafka as a dynamically loaded DLL. */
-                _set_errno(errnox);
-#else
-                errno = errnox;
-#endif
-        }
-	rd_kafka_last_error_code = err;
-	return err;
-}
-
-
-extern rd_atomic32_t rd_kafka_thread_cnt_curr;
-
-extern char RD_TLS rd_kafka_thread_name[64];
-
-
-
-
-
-int rd_kafka_path_is_dir (const char *path);
-
-rd_kafka_op_res_t
-rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko,
-                  rd_kafka_q_cb_type_t cb_type, void *opaque);
-
-rd_kafka_resp_err_t rd_kafka_subscribe_rkt (rd_kafka_itopic_t *rkt);
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_interceptor.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_interceptor.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_interceptor.c
deleted file mode 100644
index 91f7761..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_interceptor.c
+++ /dev/null
@@ -1,624 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_interceptor.h"
-#include "rdstring.h"
-
-/**
- * @brief Interceptor methodtion/method reference
- */
-typedef struct rd_kafka_interceptor_method_s {
-        union {
-                rd_kafka_interceptor_f_on_conf_set_t *on_conf_set;
-                rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup;
-                rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy;
-                rd_kafka_interceptor_f_on_new_t     *on_new;
-                rd_kafka_interceptor_f_on_destroy_t *on_destroy;
-                rd_kafka_interceptor_f_on_send_t    *on_send;
-                rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement;
-                rd_kafka_interceptor_f_on_consume_t *on_consume;
-                rd_kafka_interceptor_f_on_commit_t  *on_commit;
-                void *generic; /* For easy assignment */
-
-        } u;
-        char *ic_name;
-        void *ic_opaque;
-} rd_kafka_interceptor_method_t;
-
-/**
- * @brief Destroy interceptor methodtion reference
- */
-static void
-rd_kafka_interceptor_method_destroy (void *ptr) {
-        rd_kafka_interceptor_method_t *method = ptr;
-        rd_free(method->ic_name);
-        rd_free(method);
-}
-
-
-
-
-
-/**
- * @brief Handle an interceptor on_... methodtion call failures.
- */
-static RD_INLINE void
-rd_kafka_interceptor_failed (rd_kafka_t *rk,
-                             const rd_kafka_interceptor_method_t *method,
-                             const char *method_name, rd_kafka_resp_err_t err,
-                             const rd_kafka_message_t *rkmessage,
-                             const char *errstr) {
-
-        /* FIXME: Suppress log messages, eventually */
-        if (rkmessage)
-                rd_kafka_log(rk, LOG_WARNING, "ICFAIL",
-                             "Interceptor %s failed %s for "
-                             "message on %s [%"PRId32"] @ %"PRId64
-                             ": %s%s%s",
-                             method->ic_name, method_name,
-                             rd_kafka_topic_a2i(rkmessage->rkt)->rkt_topic->str,
-                             rkmessage->partition,
-                             rkmessage->offset,
-                             rd_kafka_err2str(err),
-                             errstr ? ": " : "",
-                             errstr ? errstr : "");
-        else
-                rd_kafka_log(rk, LOG_WARNING, "ICFAIL",
-                             "Interceptor %s failed %s: %s%s%s",
-                             method->ic_name, method_name,
-                             rd_kafka_err2str(err),
-                             errstr ? ": " : "",
-                             errstr ? errstr : "");
-
-}
-
-
-
-/**
- * @brief Create interceptor method reference.
- *        Duplicates are rejected
- */
-static rd_kafka_interceptor_method_t *
-rd_kafka_interceptor_method_new (const char *ic_name,
-                                 void *func, void *ic_opaque) {
-        rd_kafka_interceptor_method_t *method;
-
-        method             = rd_calloc(1, sizeof(*method));
-        method->ic_name    = rd_strdup(ic_name);
-        method->ic_opaque  = ic_opaque;
-        method->u.generic  = func;
-
-        return method;
-}
-
-
-/**
- * @brief Method comparator to be used for finding, not sorting.
- */
-static int rd_kafka_interceptor_method_cmp (const void *_a, const void *_b) {
-        const rd_kafka_interceptor_method_t *a = _a, *b = _b;
-
-        if (a->u.generic != b->u.generic)
-                return -1;
-
-        return strcmp(a->ic_name, b->ic_name);
-}
-
-/**
- * @brief Add interceptor method reference
- */
-static rd_kafka_resp_err_t
-rd_kafka_interceptor_method_add (rd_list_t *list, const char *ic_name,
-                                 void *func, void *ic_opaque) {
-        rd_kafka_interceptor_method_t *method;
-        const rd_kafka_interceptor_method_t skel = {
-                .ic_name = (char *)ic_name,
-                .u = { .generic = func }
-        };
-
-        /* Reject same method from same interceptor.
-         * This is needed to avoid duplicate interceptors when configuration
-         * objects are duplicated.
-         * An exception is made for lists with _F_UNIQUE, which is currently
-         * only on_conf_destroy() to allow interceptor cleanup. */
-        if ((list->rl_flags & RD_LIST_F_UNIQUE) &&
-            rd_list_find(list, &skel, rd_kafka_interceptor_method_cmp))
-                return RD_KAFKA_RESP_ERR__CONFLICT;
-
-        method = rd_kafka_interceptor_method_new(ic_name, func, ic_opaque);
-        rd_list_add(list, method);
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Destroy all interceptors
- * @locality application thread calling rd_kafka_conf_destroy() or 
- *           rd_kafka_destroy()
- */
-void rd_kafka_interceptors_destroy (rd_kafka_conf_t *conf) {
-        rd_list_destroy(&conf->interceptors.on_conf_set);
-        rd_list_destroy(&conf->interceptors.on_conf_dup);
-        rd_list_destroy(&conf->interceptors.on_conf_destroy);
-        rd_list_destroy(&conf->interceptors.on_new);
-        rd_list_destroy(&conf->interceptors.on_destroy);
-        rd_list_destroy(&conf->interceptors.on_send);
-        rd_list_destroy(&conf->interceptors.on_acknowledgement);
-        rd_list_destroy(&conf->interceptors.on_consume);
-        rd_list_destroy(&conf->interceptors.on_commit);
-
-        /* Interceptor config */
-        rd_list_destroy(&conf->interceptors.config);
-}
-
-
-/**
- * @brief Initialize interceptor sub-system for config object.
- * @locality application thread
- */
-static void
-rd_kafka_interceptors_init (rd_kafka_conf_t *conf) {
-        rd_list_init(&conf->interceptors.on_conf_set, 0,
-                     rd_kafka_interceptor_method_destroy)
-                ->rl_flags |= RD_LIST_F_UNIQUE;
-        rd_list_init(&conf->interceptors.on_conf_dup, 0,
-                     rd_kafka_interceptor_method_destroy)
-                ->rl_flags |= RD_LIST_F_UNIQUE;
-        /* conf_destroy() allows duplicates entries. */
-        rd_list_init(&conf->interceptors.on_conf_destroy, 0,
-                     rd_kafka_interceptor_method_destroy);
-        rd_list_init(&conf->interceptors.on_new, 0,
-                     rd_kafka_interceptor_method_destroy)
-                ->rl_flags |= RD_LIST_F_UNIQUE;
-        rd_list_init(&conf->interceptors.on_destroy, 0,
-                     rd_kafka_interceptor_method_destroy)
-                ->rl_flags |= RD_LIST_F_UNIQUE;
-        rd_list_init(&conf->interceptors.on_send, 0,
-                     rd_kafka_interceptor_method_destroy)
-                ->rl_flags |= RD_LIST_F_UNIQUE;
-        rd_list_init(&conf->interceptors.on_acknowledgement, 0,
-                     rd_kafka_interceptor_method_destroy)
-                ->rl_flags |= RD_LIST_F_UNIQUE;
-        rd_list_init(&conf->interceptors.on_consume, 0,
-                     rd_kafka_interceptor_method_destroy)
-                ->rl_flags |= RD_LIST_F_UNIQUE;
-        rd_list_init(&conf->interceptors.on_commit, 0,
-                     rd_kafka_interceptor_method_destroy)
-                ->rl_flags |= RD_LIST_F_UNIQUE;
-
-        /* Interceptor config */
-        rd_list_init(&conf->interceptors.config, 0,
-                     (void (*)(void *))rd_strtup_destroy);
-}
-
-
-
-
-/**
- * @name Configuration backend
- */
-
-
-/**
- * @brief Constructor called when configuration object is created.
- */
-void rd_kafka_conf_interceptor_ctor (int scope, void *pconf) {
-        rd_kafka_conf_t *conf = pconf;
-        assert(scope == _RK_GLOBAL);
-        rd_kafka_interceptors_init(conf);
-}
-
-/**
- * @brief Destructor called when configuration object is destroyed.
- */
-void rd_kafka_conf_interceptor_dtor (int scope, void *pconf) {
-        rd_kafka_conf_t *conf = pconf;
-        assert(scope == _RK_GLOBAL);
-        rd_kafka_interceptors_destroy(conf);
-}
-
-/**
- * @brief Copy-constructor called when configuration object \p psrcp is
- *        duplicated to \p dstp.
- * @remark Interceptors are NOT copied, but interceptor config is.
- *
- */
-void rd_kafka_conf_interceptor_copy (int scope, void *pdst, const void *psrc,
-                                     void *dstptr, const void *srcptr,
-                                     size_t filter_cnt, const char **filter) {
-        rd_kafka_conf_t *dconf = pdst;
-        const rd_kafka_conf_t *sconf = psrc;
-        int i;
-        const rd_strtup_t *confval;
-
-        assert(scope == _RK_GLOBAL);
-
-        /* Apply interceptor configuration values.
-         * on_conf_dup() has already been called for dconf so
-         * on_conf_set() interceptors are already in place and we can
-         * apply the configuration through the standard conf_set() API. */
-        RD_LIST_FOREACH(confval, &sconf->interceptors.config, i) {
-                size_t fi;
-                size_t nlen = strlen(confval->name);
-
-                /* Apply filter */
-                for (fi = 0 ; fi < filter_cnt ; fi++) {
-                        size_t flen = strlen(filter[fi]);
-                        if (nlen >= flen && !strncmp(filter[fi], confval->name,
-                                                     flen))
-                                break;
-                }
-
-                if (fi < filter_cnt)
-                        continue; /* Filter matched: ignore property. */
-
-                /* Ignore errors for now */
-                rd_kafka_conf_set(dconf, confval->name, confval->value,
-                                  NULL, 0);
-        }
-}
-
-
-
-
-/**
- * @brief Call interceptor on_conf_set methods.
- * @locality application thread calling rd_kafka_conf_set() and
- *           rd_kafka_conf_dup()
- */
-rd_kafka_conf_res_t
-rd_kafka_interceptors_on_conf_set (rd_kafka_conf_t *conf,
-                                   const char *name, const char *val,
-                                   char *errstr, size_t errstr_size) {
-        rd_kafka_interceptor_method_t *method;
-        int i;
-
-        RD_LIST_FOREACH(method, &conf->interceptors.on_conf_set, i) {
-                rd_kafka_conf_res_t res;
-
-                res = method->u.on_conf_set(conf,
-                                            name, val, errstr, errstr_size,
-                                            method->ic_opaque);
-                if (res == RD_KAFKA_CONF_UNKNOWN)
-                        continue;
-
-                /* Add successfully handled properties to list of
-                 * interceptor config properties so conf_t objects
-                 * can be copied. */
-                if (res == RD_KAFKA_CONF_OK)
-                        rd_list_add(&conf->interceptors.config,
-                                    rd_strtup_new(name, val));
-                return res;
-        }
-
-        return RD_KAFKA_CONF_UNKNOWN;
-}
-
-/**
- * @brief Call interceptor on_conf_dup methods.
- * @locality application thread calling rd_kafka_conf_dup()
- */
-void
-rd_kafka_interceptors_on_conf_dup (rd_kafka_conf_t *new_conf,
-                                   const rd_kafka_conf_t *old_conf,
-                                   size_t filter_cnt, const char **filter) {
-        rd_kafka_interceptor_method_t *method;
-        int i;
-
-        RD_LIST_FOREACH(method, &old_conf->interceptors.on_conf_dup, i) {
-                /* FIXME: Ignore error for now */
-                method->u.on_conf_dup(new_conf, old_conf,
-                                      filter_cnt, filter, method->ic_opaque);
-        }
-}
-
-
-/**
- * @brief Call interceptor on_conf_destroy methods.
- * @locality application thread calling rd_kafka_conf_destroy(), rd_kafka_new(),
- *           rd_kafka_destroy()
- */
-void
-rd_kafka_interceptors_on_conf_destroy (rd_kafka_conf_t *conf) {
-        rd_kafka_interceptor_method_t *method;
-        int i;
-
-        RD_LIST_FOREACH(method, &conf->interceptors.on_conf_destroy, i) {
-                /* FIXME: Ignore error for now */
-                method->u.on_conf_destroy(method->ic_opaque);
-        }
-}
-
-
-/**
- * @brief Call interceptor on_new methods.
- * @locality application thread calling rd_kafka_new()
- */
-void
-rd_kafka_interceptors_on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf) {
-        rd_kafka_interceptor_method_t *method;
-        int i;
-        char errstr[512];
-
-        RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_new, i) {
-                rd_kafka_resp_err_t err;
-
-                err = method->u.on_new(rk, conf, method->ic_opaque,
-                                       errstr, sizeof(errstr));
-                if (unlikely(err))
-                        rd_kafka_interceptor_failed(rk, method, "on_new", err,
-                                                    NULL, errstr);
-        }
-}
-
-
-
-/**
- * @brief Call interceptor on_destroy methods.
- * @locality application thread calling rd_kafka_new() or rd_kafka_destroy()
- */
-void
-rd_kafka_interceptors_on_destroy (rd_kafka_t *rk) {
-        rd_kafka_interceptor_method_t *method;
-        int i;
-
-        RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_destroy, i) {
-                rd_kafka_resp_err_t err;
-
-                err = method->u.on_destroy(rk, method->ic_opaque);
-                if (unlikely(err))
-                        rd_kafka_interceptor_failed(rk, method, "on_destroy",
-                                                    err, NULL, NULL);
-        }
-}
-
-
-
-/**
- * @brief Call interceptor on_send methods.
- * @locality application thread calling produce()
- */
-void
-rd_kafka_interceptors_on_send (rd_kafka_t *rk, rd_kafka_message_t *rkmessage) {
-        rd_kafka_interceptor_method_t *method;
-        int i;
-
-        RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_send, i) {
-                rd_kafka_resp_err_t err;
-
-                err = method->u.on_send(rk, rkmessage, method->ic_opaque);
-                if (unlikely(err))
-                        rd_kafka_interceptor_failed(rk, method, "on_send", err,
-                                                    rkmessage, NULL);
-        }
-}
-
-
-
-/**
- * @brief Call interceptor on_acknowledgement methods.
- * @locality application thread calling poll(), or the broker thread if
- *           if dr callback has been set.
- */
-void
-rd_kafka_interceptors_on_acknowledgement (rd_kafka_t *rk,
-                                          rd_kafka_message_t *rkmessage) {
-        rd_kafka_interceptor_method_t *method;
-        int i;
-
-        RD_LIST_FOREACH(method,
-                        &rk->rk_conf.interceptors.on_acknowledgement, i) {
-                rd_kafka_resp_err_t err;
-
-                err = method->u.on_acknowledgement(rk, rkmessage,
-                                                   method->ic_opaque);
-                if (unlikely(err))
-                        rd_kafka_interceptor_failed(rk, method,
-                                                    "on_acknowledgement", err,
-                                                    rkmessage, NULL);
-        }
-}
-
-
-/**
- * @brief Call on_acknowledgement methods for all messages in queue.
- * @locality broker thread
- */
-void
-rd_kafka_interceptors_on_acknowledgement_queue (rd_kafka_t *rk,
-                                                rd_kafka_msgq_t *rkmq) {
-        rd_kafka_msg_t *rkm;
-
-        RD_KAFKA_MSGQ_FOREACH(rkm, rkmq) {
-                rd_kafka_interceptors_on_acknowledgement(rk,
-                                                         &rkm->rkm_rkmessage);
-        }
-}
-
-
-/**
- * @brief Call interceptor on_consume methods.
- * @locality application thread calling poll(), consume() or similar prior to
- *           passing the message to the application.
- */
-void
-rd_kafka_interceptors_on_consume (rd_kafka_t *rk,
-                                  rd_kafka_message_t *rkmessage) {
-        rd_kafka_interceptor_method_t *method;
-        int i;
-
-        RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_consume, i) {
-                rd_kafka_resp_err_t err;
-
-                err = method->u.on_consume(rk, rkmessage,
-                                                   method->ic_opaque);
-                if (unlikely(err))
-                        rd_kafka_interceptor_failed(rk, method,
-                                                    "on_consume", err,
-                                                    rkmessage, NULL);
-        }
-}
-
-
-/**
- * @brief Call interceptor on_commit methods.
- * @locality application thread calling poll(), consume() or similar,
- *           or rdkafka main thread if no commit_cb or handler registered.
- */
-void
-rd_kafka_interceptors_on_commit (rd_kafka_t *rk,
-                                 const rd_kafka_topic_partition_list_t *offsets,
-                                 rd_kafka_resp_err_t err) {
-        rd_kafka_interceptor_method_t *method;
-        int i;
-
-        RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_commit, i) {
-                rd_kafka_resp_err_t ic_err;
-
-                ic_err = method->u.on_commit(rk, offsets, err,
-                                             method->ic_opaque);
-                if (unlikely(ic_err))
-                        rd_kafka_interceptor_failed(rk, method,
-                                                    "on_commit", ic_err, NULL,
-                                                    NULL);
-        }
-}
-
-
-
-
-/**
- * @name Public API (backend)
- * @{
- */
-
-
-rd_kafka_resp_err_t
-rd_kafka_conf_interceptor_add_on_conf_set (
-        rd_kafka_conf_t *conf, const char *ic_name,
-        rd_kafka_interceptor_f_on_conf_set_t *on_conf_set,
-        void *ic_opaque) {
-        return rd_kafka_interceptor_method_add(&conf->interceptors.on_conf_set,
-                                               ic_name, (void *)on_conf_set,
-                                               ic_opaque);
-}
-
-rd_kafka_resp_err_t
-rd_kafka_conf_interceptor_add_on_conf_dup (
-        rd_kafka_conf_t *conf, const char *ic_name,
-        rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup,
-        void *ic_opaque) {
-        return rd_kafka_interceptor_method_add(&conf->interceptors.on_conf_dup,
-                                               ic_name, (void *)on_conf_dup,
-                                               ic_opaque);
-}
-
-rd_kafka_resp_err_t
-rd_kafka_conf_interceptor_add_on_conf_destroy (
-        rd_kafka_conf_t *conf, const char *ic_name,
-        rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy,
-        void *ic_opaque) {
-        return rd_kafka_interceptor_method_add(&conf->interceptors.on_conf_destroy,
-                                               ic_name, (void *)on_conf_destroy,
-                                               ic_opaque);
-}
-
-
-
-rd_kafka_resp_err_t
-rd_kafka_conf_interceptor_add_on_new (
-        rd_kafka_conf_t *conf, const char *ic_name,
-        rd_kafka_interceptor_f_on_new_t *on_new,
-        void *ic_opaque) {
-        return rd_kafka_interceptor_method_add(&conf->interceptors.on_new,
-                                               ic_name, (void *)on_new,
-                                               ic_opaque);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_interceptor_add_on_destroy (
-        rd_kafka_t *rk, const char *ic_name,
-        rd_kafka_interceptor_f_on_destroy_t *on_destroy,
-        void *ic_opaque) {
-        assert(!rk->rk_initialized);
-        return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors.on_destroy,
-                                               ic_name, (void *)on_destroy,
-                                               ic_opaque);
-}
-
-rd_kafka_resp_err_t
-rd_kafka_interceptor_add_on_send (
-        rd_kafka_t *rk, const char *ic_name,
-        rd_kafka_interceptor_f_on_send_t *on_send,
-        void *ic_opaque) {
-        assert(!rk->rk_initialized);
-        return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors.on_send,
-                                               ic_name, (void *)on_send,
-                                               ic_opaque);
-}
-
-rd_kafka_resp_err_t
-rd_kafka_interceptor_add_on_acknowledgement (
-        rd_kafka_t *rk, const char *ic_name,
-        rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement,
-        void *ic_opaque) {
-        assert(!rk->rk_initialized);
-        return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors.
-                                               on_acknowledgement,
-                                               ic_name,
-                                               (void *)on_acknowledgement,
-                                               ic_opaque);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_interceptor_add_on_consume (
-        rd_kafka_t *rk, const char *ic_name,
-        rd_kafka_interceptor_f_on_consume_t *on_consume,
-        void *ic_opaque) {
-        assert(!rk->rk_initialized);
-        return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors.
-                                               on_consume,
-                                               ic_name, (void *)on_consume,
-                                               ic_opaque);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_interceptor_add_on_commit (
-        rd_kafka_t *rk, const char *ic_name,
-        rd_kafka_interceptor_f_on_commit_t *on_commit,
-        void *ic_opaque) {
-        assert(!rk->rk_initialized);
-        return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors.
-                                               on_commit,
-                                               ic_name, (void *)on_commit,
-                                               ic_opaque);
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_interceptor.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_interceptor.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_interceptor.h
deleted file mode 100644
index 6be4e86..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_interceptor.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_INTERCEPTOR_H
-#define _RDKAFKA_INTERCEPTOR_H
-
-rd_kafka_conf_res_t
-rd_kafka_interceptors_on_conf_set (rd_kafka_conf_t *conf,
-                                   const char *name, const char *val,
-                                   char *errstr, size_t errstr_size);
-void
-rd_kafka_interceptors_on_conf_dup (rd_kafka_conf_t *new_conf,
-                                   const rd_kafka_conf_t *old_conf,
-                                   size_t filter_cnt, const char **filter);
-void
-rd_kafka_interceptors_on_conf_destroy (rd_kafka_conf_t *conf) ;
-void
-rd_kafka_interceptors_on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf);
-void
-rd_kafka_interceptors_on_destroy (rd_kafka_t *rk);
-void
-rd_kafka_interceptors_on_send (rd_kafka_t *rk, rd_kafka_message_t *rkmessage);
-void
-rd_kafka_interceptors_on_acknowledgement (rd_kafka_t *rk,
-                                          rd_kafka_message_t *rkmessage);
-void
-rd_kafka_interceptors_on_acknowledgement_queue (rd_kafka_t *rk,
-                                                rd_kafka_msgq_t *rkmq);
-
-void rd_kafka_interceptors_on_consume (rd_kafka_t *rk,
-                                       rd_kafka_message_t *rkmessage);
-void
-rd_kafka_interceptors_on_commit (rd_kafka_t *rk,
-                                 const rd_kafka_topic_partition_list_t *offsets,
-                                 rd_kafka_resp_err_t err);
-
-
-void rd_kafka_conf_interceptor_ctor (int scope, void *pconf);
-void rd_kafka_conf_interceptor_dtor (int scope, void *pconf);
-void rd_kafka_conf_interceptor_copy (int scope, void *pdst, const void *psrc,
-                                     void *dstptr, const void *srcptr,
-                                     size_t filter_cnt, const char **filter);
-
-void rd_kafka_interceptors_destroy (rd_kafka_conf_t *conf);
-
-#endif /* _RDKAFKA_INTERCEPTOR_H */


[35/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka.c b/thirdparty/librdkafka-0.11.1/src/rdkafka.c
deleted file mode 100644
index 6867a6c..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka.c
+++ /dev/null
@@ -1,3392 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#define _GNU_SOURCE
-#include <errno.h>
-#include <string.h>
-#include <stdarg.h>
-#include <signal.h>
-#include <stdlib.h>
-#include <sys/stat.h>
-
-#include "rdkafka_int.h"
-#include "rdkafka_msg.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_cgrp.h"
-#include "rdkafka_assignor.h"
-#include "rdkafka_request.h"
-#include "rdkafka_event.h"
-#include "rdkafka_sasl.h"
-#include "rdkafka_interceptor.h"
-
-#include "rdtime.h"
-#include "crc32c.h"
-#include "rdunittest.h"
-
-#ifdef _MSC_VER
-#include <sys/types.h>
-#include <sys/timeb.h>
-#endif
-
-
-
-static once_flag rd_kafka_global_init_once = ONCE_FLAG_INIT;
-
-/**
- * @brief Global counter+lock for all active librdkafka instances
- */
-mtx_t rd_kafka_global_lock;
-int rd_kafka_global_cnt;
-
-
-/**
- * Last API error code, per thread.
- * Shared among all rd_kafka_t instances.
- */
-rd_kafka_resp_err_t RD_TLS rd_kafka_last_error_code;
-
-
-/**
- * Current number of threads created by rdkafka.
- * This is used in regression tests.
- */
-rd_atomic32_t rd_kafka_thread_cnt_curr;
-int rd_kafka_thread_cnt (void) {
-#if ENABLE_SHAREDPTR_DEBUG
-        rd_shared_ptrs_dump();
-#endif
-
-	return rd_atomic32_get(&rd_kafka_thread_cnt_curr);
-}
-
-/**
- * Current thread's name (TLS)
- */
-char RD_TLS rd_kafka_thread_name[64] = "app";
-
-
-
-static void rd_kafka_global_init (void) {
-#if ENABLE_SHAREDPTR_DEBUG
-        LIST_INIT(&rd_shared_ptr_debug_list);
-        mtx_init(&rd_shared_ptr_debug_mtx, mtx_plain);
-        atexit(rd_shared_ptrs_dump);
-#endif
-	mtx_init(&rd_kafka_global_lock, mtx_plain);
-#if ENABLE_DEVEL
-	rd_atomic32_init(&rd_kafka_op_cnt, 0);
-#endif
-        crc32c_global_init();
-}
-
-/**
- * @returns the current number of active librdkafka instances
- */
-static int rd_kafka_global_cnt_get (void) {
-	int r;
-	mtx_lock(&rd_kafka_global_lock);
-	r = rd_kafka_global_cnt;
-	mtx_unlock(&rd_kafka_global_lock);
-	return r;
-}
-
-
-/**
- * @brief Increase counter for active librdkafka instances.
- * If this is the first instance the global constructors will be called, if any.
- */
-static void rd_kafka_global_cnt_incr (void) {
-	mtx_lock(&rd_kafka_global_lock);
-	rd_kafka_global_cnt++;
-	if (rd_kafka_global_cnt == 1) {
-		rd_kafka_transport_init();
-#if WITH_SSL
-		rd_kafka_transport_ssl_init();
-#endif
-                rd_kafka_sasl_global_init();
-	}
-	mtx_unlock(&rd_kafka_global_lock);
-}
-
-/**
- * @brief Decrease counter for active librdkafka instances.
- * If this counter reaches 0 the global destructors will be called, if any.
- */
-static void rd_kafka_global_cnt_decr (void) {
-	mtx_lock(&rd_kafka_global_lock);
-	rd_kafka_assert(NULL, rd_kafka_global_cnt > 0);
-	rd_kafka_global_cnt--;
-	if (rd_kafka_global_cnt == 0) {
-                rd_kafka_sasl_global_term();
-#if WITH_SSL
-		rd_kafka_transport_ssl_term();
-#endif
-	}
-	mtx_unlock(&rd_kafka_global_lock);
-}
-
-
-/**
- * Wait for all rd_kafka_t objects to be destroyed.
- * Returns 0 if all kafka objects are now destroyed, or -1 if the
- * timeout was reached.
- */
-int rd_kafka_wait_destroyed (int timeout_ms) {
-	rd_ts_t timeout = rd_clock() + (timeout_ms * 1000);
-
-	while (rd_kafka_thread_cnt() > 0 ||
-	       rd_kafka_global_cnt_get() > 0) {
-		if (rd_clock() >= timeout) {
-			rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT,
-						ETIMEDOUT);
-#if ENABLE_SHAREDPTR_DEBUG
-                        rd_shared_ptrs_dump();
-#endif
-			return -1;
-		}
-		rd_usleep(25000, NULL); /* 25ms */
-	}
-
-	return 0;
-}
-
-static void rd_kafka_log_buf (const rd_kafka_conf_t *conf,
-                              const rd_kafka_t *rk, int level, const char *fac,
-                              const char *buf) {
-        if (level > conf->log_level)
-                return;
-        else if (rk && conf->log_queue) {
-                rd_kafka_op_t *rko;
-
-                if (!rk->rk_logq)
-                        return; /* Terminating */
-
-                rko = rd_kafka_op_new(RD_KAFKA_OP_LOG);
-                rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_MEDIUM);
-                rko->rko_u.log.level = level;
-                strncpy(rko->rko_u.log.fac, fac,
-                        sizeof(rko->rko_u.log.fac) - 1);
-                rko->rko_u.log.str = rd_strdup(buf);
-                rd_kafka_q_enq(rk->rk_logq, rko);
-
-        } else if (conf->log_cb) {
-                conf->log_cb(rk, level, fac, buf);
-        }
-}
-
-/**
- * @brief Logger
- *
- * @remark conf must be set, but rk may be NULL
- */
-void rd_kafka_log0 (const rd_kafka_conf_t *conf,
-                    const rd_kafka_t *rk,
-                    const char *extra, int level,
-                    const char *fac, const char *fmt, ...) {
-	char buf[2048];
-	va_list ap;
-	unsigned int elen = 0;
-        unsigned int of = 0;
-
-	if (level > conf->log_level)
-		return;
-
-	if (conf->log_thread_name) {
-		elen = rd_snprintf(buf, sizeof(buf), "[thrd:%s]: ",
-				   rd_kafka_thread_name);
-		if (unlikely(elen >= sizeof(buf)))
-			elen = sizeof(buf);
-		of = elen;
-	}
-
-	if (extra) {
-		elen = rd_snprintf(buf+of, sizeof(buf)-of, "%s: ", extra);
-		if (unlikely(elen >= sizeof(buf)-of))
-			elen = sizeof(buf)-of;
-                of += elen;
-	}
-
-	va_start(ap, fmt);
-	rd_vsnprintf(buf+of, sizeof(buf)-of, fmt, ap);
-	va_end(ap);
-
-        rd_kafka_log_buf(conf, rk, level, fac, buf);
-}
-
-
-
-void rd_kafka_log_print(const rd_kafka_t *rk, int level,
-	const char *fac, const char *buf) {
-	int secs, msecs;
-	struct timeval tv;
-	rd_gettimeofday(&tv, NULL);
-	secs = (int)tv.tv_sec;
-	msecs = (int)(tv.tv_usec / 1000);
-	fprintf(stderr, "%%%i|%u.%03u|%s|%s| %s\n",
-		level, secs, msecs,
-		fac, rk ? rk->rk_name : "", buf);
-}
-
-#ifndef _MSC_VER
-void rd_kafka_log_syslog (const rd_kafka_t *rk, int level,
-			  const char *fac, const char *buf) {
-	static int initialized = 0;
-
-	if (!initialized)
-		openlog("rdkafka", LOG_PID|LOG_CONS, LOG_USER);
-
-	syslog(level, "%s: %s: %s", fac, rk ? rk->rk_name : "", buf);
-}
-#endif
-
-void rd_kafka_set_logger (rd_kafka_t *rk,
-			  void (*func) (const rd_kafka_t *rk, int level,
-					const char *fac, const char *buf)) {
-	rk->rk_conf.log_cb = func;
-}
-
-void rd_kafka_set_log_level (rd_kafka_t *rk, int level) {
-	rk->rk_conf.log_level = level;
-}
-
-
-
-
-
-
-static const char *rd_kafka_type2str (rd_kafka_type_t type) {
-	static const char *types[] = {
-		[RD_KAFKA_PRODUCER] = "producer",
-		[RD_KAFKA_CONSUMER] = "consumer",
-	};
-	return types[type];
-}
-
-#define _ERR_DESC(ENUM,DESC) \
-	[ENUM - RD_KAFKA_RESP_ERR__BEGIN] = { ENUM, # ENUM + 18/*pfx*/, DESC }
-
-static const struct rd_kafka_err_desc rd_kafka_err_descs[] = {
-	_ERR_DESC(RD_KAFKA_RESP_ERR__BEGIN, NULL),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__BAD_MSG,
-		  "Local: Bad message format"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__BAD_COMPRESSION,
-		  "Local: Invalid compressed data"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__DESTROY,
-		  "Local: Broker handle destroyed"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__FAIL,
-		  "Local: Communication failure with broker"), //FIXME: too specific
-	_ERR_DESC(RD_KAFKA_RESP_ERR__TRANSPORT,
-		  "Local: Broker transport failure"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE,
-		  "Local: Critical system resource failure"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__RESOLVE,
-		  "Local: Host resolution failure"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__MSG_TIMED_OUT,
-		  "Local: Message timed out"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__PARTITION_EOF,
-		  "Broker: No more messages"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
-		  "Local: Unknown partition"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__FS,
-		  "Local: File or filesystem error"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC,
-		  "Local: Unknown topic"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN,
-		  "Local: All broker connections are down"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_ARG,
-		  "Local: Invalid argument or configuration"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT,
-		  "Local: Timed out"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__QUEUE_FULL,
-		  "Local: Queue full"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__ISR_INSUFF,
-		  "Local: ISR count insufficient"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__NODE_UPDATE,
-		  "Local: Broker node update"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__SSL,
-		  "Local: SSL error"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_COORD,
-		  "Local: Waiting for coordinator"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_GROUP,
-		  "Local: Unknown group"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__IN_PROGRESS,
-		  "Local: Operation in progress"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS,
-		  "Local: Previous operation in progress"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION,
-		  "Local: Existing subscription"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
-		  "Local: Assign partitions"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
-		  "Local: Revoke partitions"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__CONFLICT,
-		  "Local: Conflicting use"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__STATE,
-		  "Local: Erroneous state"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL,
-		  "Local: Unknown protocol"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED,
-		  "Local: Not implemented"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__AUTHENTICATION,
-		  "Local: Authentication failure"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__NO_OFFSET,
-		  "Local: No offset stored"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__OUTDATED,
-		  "Local: Outdated"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE,
-		  "Local: Timed out in queue"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
-                  "Local: Required feature not supported by broker"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_CACHE,
-                  "Local: Awaiting cache update"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__INTR,
-                  "Local: Operation interrupted"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_SERIALIZATION,
-                  "Local: Key serialization error"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION,
-                  "Local: Value serialization error"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION,
-                  "Local: Key deserialization error"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION,
-                  "Local: Value deserialization error"),
-
-	_ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN,
-		  "Unknown broker error"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_NO_ERROR,
-		  "Success"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE,
-		  "Broker: Offset out of range"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG,
-		  "Broker: Invalid message"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
-		  "Broker: Unknown topic or partition"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE,
-		  "Broker: Invalid message size"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE,
-		  "Broker: Leader not available"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION,
-		  "Broker: Not leader for partition"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
-		  "Broker: Request timed out"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE,
-		  "Broker: Broker not available"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE,
-		  "Broker: Replica not available"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE,
-		  "Broker: Message size too large"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH,
-		  "Broker: StaleControllerEpochCode"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE,
-		  "Broker: Offset metadata string too large"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION,
-		  "Broker: Broker disconnected before response received"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS,
-		  "Broker: Group coordinator load in progress"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE,
-		  "Broker: Group coordinator not available"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP,
-		  "Broker: Not coordinator for group"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION,
-		  "Broker: Invalid topic"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE,
-		  "Broker: Message batch larger than configured server "
-		  "segment size"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS,
-		  "Broker: Not enough in-sync replicas"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND,
-		  "Broker: Message(s) written to insufficient number of "
-		  "in-sync replicas"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS,
-		  "Broker: Invalid required acks value"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
-		  "Broker: Specified group generation id is not valid"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL,
-		  "Broker: Inconsistent group protocol"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_GROUP_ID,
-		  "Broker: Invalid group.id"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID,
-		  "Broker: Unknown member"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT,
-		  "Broker: Invalid session timeout"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
-		  "Broker: Group rebalance in progress"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE,
-		  "Broker: Commit offset data size is not valid"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
-		  "Broker: Topic authorization failed"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED,
-		  "Broker: Group authorization failed"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED,
-		  "Broker: Cluster authorization failed"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP,
-		  "Broker: Invalid timestamp"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM,
-		  "Broker: Unsupported SASL mechanism"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE,
-		  "Broker: Request not valid in current SASL state"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION,
-		  "Broker: API version not supported"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS,
-		  "Broker: Topic already exists"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PARTITIONS,
-		  "Broker: Invalid number of partitions"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR,
-		  "Broker: Invalid replication factor"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT,
-		  "Broker: Invalid replica assignment"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_CONFIG,
-		  "Broker: Configuration is invalid"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_NOT_CONTROLLER,
-		  "Broker: Not controller for cluster"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUEST,
-		  "Broker: Invalid request"),
-	_ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT,
-		  "Broker: Message format on broker does not support request"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_POLICY_VIOLATION,
-                  "Broker: Isolation policy volation"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER,
-                  "Broker: Broker received an out of order sequence number"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER,
-                  "Broker: Broker received a duplicate sequence number"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH,
-                  "Broker: Producer attempted an operation with an old epoch"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TXN_STATE,
-                  "Broker: Producer attempted a transactional operation in "
-                  "an invalid state"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING,
-                  "Broker: Producer attempted to use a producer id which is "
-                  "not currently assigned to its transactional id"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT,
-                  "Broker: Transaction timeout is larger than the maximum "
-                  "value allowed by the broker's max.transaction.timeout.ms"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
-                  "Broker: Producer attempted to update a transaction while "
-                  "another concurrent operation on the same transaction was "
-                  "ongoing"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED,
-                  "Broker: Indicates that the transaction coordinator sending "
-                  "a WriteTxnMarker is no longer the current coordinator for "
-                  "a given producer"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED,
-                  "Broker: Transactional Id authorization failed"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_SECURITY_DISABLED,
-                  "Broker: Security features are disabled"),
-        _ERR_DESC(RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED,
-                  "Broker: Operation not attempted"),
-
-	_ERR_DESC(RD_KAFKA_RESP_ERR__END, NULL)
-};
-
-
-void rd_kafka_get_err_descs (const struct rd_kafka_err_desc **errdescs,
-			     size_t *cntp) {
-	*errdescs = rd_kafka_err_descs;
-	*cntp = RD_ARRAYSIZE(rd_kafka_err_descs);
-}
-
-
-const char *rd_kafka_err2str (rd_kafka_resp_err_t err) {
-	static RD_TLS char ret[32];
-	int idx = err - RD_KAFKA_RESP_ERR__BEGIN;
-
-	if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN ||
-		     err >= RD_KAFKA_RESP_ERR_END_ALL ||
-		     !rd_kafka_err_descs[idx].desc)) {
-		rd_snprintf(ret, sizeof(ret), "Err-%i?", err);
-		return ret;
-	}
-
-	return rd_kafka_err_descs[idx].desc;
-}
-
-
-const char *rd_kafka_err2name (rd_kafka_resp_err_t err) {
-	static RD_TLS char ret[32];
-	int idx = err - RD_KAFKA_RESP_ERR__BEGIN;
-
-	if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN ||
-		     err >= RD_KAFKA_RESP_ERR_END_ALL ||
-		     !rd_kafka_err_descs[idx].desc)) {
-		rd_snprintf(ret, sizeof(ret), "ERR_%i?", err);
-		return ret;
-	}
-
-	return rd_kafka_err_descs[idx].name;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_last_error (void) {
-	return rd_kafka_last_error_code;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_errno2err (int errnox) {
-	switch (errnox)
-	{
-	case EINVAL:
-		return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
-        case EBUSY:
-                return RD_KAFKA_RESP_ERR__CONFLICT;
-
-	case ENOENT:
-		return RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
-
-	case ESRCH:
-		return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-
-	case ETIMEDOUT:
-		return RD_KAFKA_RESP_ERR__TIMED_OUT;
-
-	case EMSGSIZE:
-		return RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE;
-
-	case ENOBUFS:
-		return RD_KAFKA_RESP_ERR__QUEUE_FULL;
-
-	default:
-		return RD_KAFKA_RESP_ERR__FAIL;
-	}
-}
-
-
-
-/**
- * @brief Final destructor for rd_kafka_t, must only be called with refcnt 0.
- *
- * @locality application thread
- */
-void rd_kafka_destroy_final (rd_kafka_t *rk) {
-
-        rd_kafka_assert(rk, rd_atomic32_get(&rk->rk_terminate) != 0);
-
-        /* Synchronize state */
-        rd_kafka_wrlock(rk);
-        rd_kafka_wrunlock(rk);
-
-        rd_kafka_assignors_term(rk);
-
-        rd_kafka_metadata_cache_destroy(rk);
-
-        rd_kafka_timers_destroy(&rk->rk_timers);
-
-        rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying op queues");
-
-        /* Destroy cgrp */
-        if (rk->rk_cgrp) {
-                rd_kafka_dbg(rk, GENERIC, "TERMINATE",
-                             "Destroying cgrp");
-                /* Reset queue forwarding (rep -> cgrp) */
-                rd_kafka_q_fwd_set(rk->rk_rep, NULL);
-                rd_kafka_cgrp_destroy_final(rk->rk_cgrp);
-        }
-
-	/* Purge op-queues */
-	rd_kafka_q_destroy(rk->rk_rep);
-	rd_kafka_q_destroy(rk->rk_ops);
-
-#if WITH_SSL
-	if (rk->rk_conf.ssl.ctx) {
-                rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying SSL CTX");
-                rd_kafka_transport_ssl_ctx_term(rk);
-        }
-#endif
-
-        /* It is not safe to log after this point. */
-        rd_kafka_dbg(rk, GENERIC, "TERMINATE",
-                     "Termination done: freeing resources");
-
-        if (rk->rk_logq) {
-                rd_kafka_q_destroy(rk->rk_logq);
-                rk->rk_logq = NULL;
-        }
-
-        if (rk->rk_type == RD_KAFKA_PRODUCER) {
-		cnd_destroy(&rk->rk_curr_msgs.cnd);
-		mtx_destroy(&rk->rk_curr_msgs.lock);
-	}
-
-	cnd_destroy(&rk->rk_broker_state_change_cnd);
-	mtx_destroy(&rk->rk_broker_state_change_lock);
-
-	if (rk->rk_full_metadata)
-		rd_kafka_metadata_destroy(rk->rk_full_metadata);
-        rd_kafkap_str_destroy(rk->rk_client_id);
-        rd_kafkap_str_destroy(rk->rk_group_id);
-        rd_kafkap_str_destroy(rk->rk_eos.TransactionalId);
-	rd_kafka_anyconf_destroy(_RK_GLOBAL, &rk->rk_conf);
-        rd_list_destroy(&rk->rk_broker_by_id);
-
-	rd_kafkap_bytes_destroy((rd_kafkap_bytes_t *)rk->rk_null_bytes);
-	rwlock_destroy(&rk->rk_lock);
-
-	rd_free(rk);
-	rd_kafka_global_cnt_decr();
-}
-
-
-static void rd_kafka_destroy_app (rd_kafka_t *rk, int blocking) {
-        thrd_t thrd;
-#ifndef _MSC_VER
-	int term_sig = rk->rk_conf.term_sig;
-#endif
-        rd_kafka_dbg(rk, ALL, "DESTROY", "Terminating instance");
-
-        /* The legacy/simple consumer lacks an API to close down the consumer*/
-        if (rk->rk_cgrp) {
-                rd_kafka_dbg(rk, GENERIC, "TERMINATE",
-                             "Closing consumer group");
-                rd_kafka_consumer_close(rk);
-        }
-
-        rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Interrupting timers");
-        rd_kafka_wrlock(rk);
-        thrd = rk->rk_thread;
-	rd_atomic32_add(&rk->rk_terminate, 1);
-        rd_kafka_timers_interrupt(&rk->rk_timers);
-        rd_kafka_wrunlock(rk);
-
-        rd_kafka_dbg(rk, GENERIC, "TERMINATE",
-                     "Sending TERMINATE to main background thread");
-        /* Send op to trigger queue/io wake-up.
-         * The op itself is (likely) ignored by the receiver. */
-        rd_kafka_q_enq(rk->rk_ops, rd_kafka_op_new(RD_KAFKA_OP_TERMINATE));
-
-	rd_kafka_brokers_broadcast_state_change(rk);
-
-#ifndef _MSC_VER
-        /* Interrupt main kafka thread to speed up termination. */
-	if (term_sig) {
-                rd_kafka_dbg(rk, GENERIC, "TERMINATE",
-                             "Sending thread kill signal %d", term_sig);
-                pthread_kill(thrd, term_sig);
-        }
-#endif
-
-        if (!blocking)
-                return; /* FIXME: thread resource leak */
-
-        rd_kafka_dbg(rk, GENERIC, "TERMINATE",
-                     "Joining main background thread");
-
-        if (thrd_join(thrd, NULL) != thrd_success)
-                rd_kafka_assert(NULL, !*"failed to join main thread");
-
-        rd_kafka_destroy_final(rk);
-}
-
-
-/* NOTE: Must only be called by application.
- *       librdkafka itself must use rd_kafka_destroy0(). */
-void rd_kafka_destroy (rd_kafka_t *rk) {
-        rd_kafka_destroy_app(rk, 1);
-}
-
-
-/**
- * Main destructor for rd_kafka_t
- *
- * Locality: rdkafka main thread or application thread during rd_kafka_new()
- */
-static void rd_kafka_destroy_internal (rd_kafka_t *rk) {
-	rd_kafka_itopic_t *rkt, *rkt_tmp;
-	rd_kafka_broker_t *rkb, *rkb_tmp;
-        rd_list_t wait_thrds;
-        thrd_t *thrd;
-        int i;
-
-        rd_kafka_dbg(rk, ALL, "DESTROY", "Destroy internal");
-
-        /* Call on_destroy() interceptors */
-        rd_kafka_interceptors_on_destroy(rk);
-
-	/* Brokers pick up on rk_terminate automatically. */
-
-        /* List of (broker) threads to join to synchronize termination */
-        rd_list_init(&wait_thrds, rd_atomic32_get(&rk->rk_broker_cnt), NULL);
-
-	rd_kafka_wrlock(rk);
-
-        rd_kafka_dbg(rk, ALL, "DESTROY", "Removing all topics");
-	/* Decommission all topics */
-	TAILQ_FOREACH_SAFE(rkt, &rk->rk_topics, rkt_link, rkt_tmp) {
-		rd_kafka_wrunlock(rk);
-		rd_kafka_topic_partitions_remove(rkt);
-		rd_kafka_wrlock(rk);
-	}
-
-        /* Decommission brokers.
-         * Broker thread holds a refcount and detects when broker refcounts
-         * reaches 1 and then decommissions itself. */
-        TAILQ_FOREACH_SAFE(rkb, &rk->rk_brokers, rkb_link, rkb_tmp) {
-                /* Add broker's thread to wait_thrds list for later joining */
-                thrd = malloc(sizeof(*thrd));
-                *thrd = rkb->rkb_thread;
-                rd_list_add(&wait_thrds, thrd);
-                rd_kafka_wrunlock(rk);
-
-                /* Send op to trigger queue/io wake-up.
-                 * The op itself is (likely) ignored by the broker thread. */
-                rd_kafka_q_enq(rkb->rkb_ops,
-                               rd_kafka_op_new(RD_KAFKA_OP_TERMINATE));
-
-#ifndef _MSC_VER
-                /* Interrupt IO threads to speed up termination. */
-                if (rk->rk_conf.term_sig)
-			pthread_kill(rkb->rkb_thread, rk->rk_conf.term_sig);
-#endif
-
-                rd_kafka_broker_destroy(rkb);
-
-                rd_kafka_wrlock(rk);
-        }
-
-        if (rk->rk_clusterid) {
-                rd_free(rk->rk_clusterid);
-                rk->rk_clusterid = NULL;
-        }
-
-        rd_kafka_wrunlock(rk);
-
-        rd_kafka_dbg(rk, GENERIC, "TERMINATE",
-                     "Purging reply queue");
-
-	/* Purge op-queue */
-        rd_kafka_q_disable(rk->rk_rep);
-	rd_kafka_q_purge(rk->rk_rep);
-
-	/* Loose our special reference to the internal broker. */
-        mtx_lock(&rk->rk_internal_rkb_lock);
-	if ((rkb = rk->rk_internal_rkb)) {
-                rd_kafka_dbg(rk, GENERIC, "TERMINATE",
-                             "Decommissioning internal broker");
-
-                /* Send op to trigger queue wake-up. */
-                rd_kafka_q_enq(rkb->rkb_ops,
-                               rd_kafka_op_new(RD_KAFKA_OP_TERMINATE));
-
-                rk->rk_internal_rkb = NULL;
-                thrd = malloc(sizeof(*thrd));
-                *thrd = rkb->rkb_thread;
-                rd_list_add(&wait_thrds, thrd);
-        }
-        mtx_unlock(&rk->rk_internal_rkb_lock);
-	if (rkb)
-		rd_kafka_broker_destroy(rkb);
-
-
-        rd_kafka_dbg(rk, GENERIC, "TERMINATE",
-                     "Join %d broker thread(s)", rd_list_cnt(&wait_thrds));
-
-        /* Join broker threads */
-        RD_LIST_FOREACH(thrd, &wait_thrds, i) {
-                if (thrd_join(*thrd, NULL) != thrd_success)
-                        ;
-                free(thrd);
-        }
-
-        rd_list_destroy(&wait_thrds);
-}
-
-
-/* Stats buffer printf */
-#define _st_printf(...) do {					\
-		ssize_t r;					\
-		ssize_t rem = size-of;				\
-		r = rd_snprintf(buf+of, rem, __VA_ARGS__);	\
-		if (r >= rem) {					\
-			size *= 2;				\
-			rem = size-of;				\
-			buf = rd_realloc(buf, size);		\
-			r = rd_snprintf(buf+of, rem, __VA_ARGS__);	\
-		}						\
-		of += r;					\
-	} while (0)
-
-/**
- * Emit stats for toppar
- */
-static RD_INLINE void rd_kafka_stats_emit_toppar (char **bufp, size_t *sizep,
-					       size_t *ofp,
-					       rd_kafka_toppar_t *rktp,
-					       int first) {
-	char *buf = *bufp;
-	size_t size = *sizep;
-	size_t of = *ofp;
-        int64_t consumer_lag = -1;
-        struct offset_stats offs;
-        int32_t leader_nodeid = -1;
-
-        rd_kafka_toppar_lock(rktp);
-
-        if (rktp->rktp_leader) {
-                rd_kafka_broker_lock(rktp->rktp_leader);
-                leader_nodeid = rktp->rktp_leader->rkb_nodeid;
-                rd_kafka_broker_unlock(rktp->rktp_leader);
-        }
-
-        /* Grab a copy of the latest finalized offset stats */
-        offs = rktp->rktp_offsets_fin;
-
-        if (rktp->rktp_hi_offset != RD_KAFKA_OFFSET_INVALID &&
-            rktp->rktp_app_offset >= 0) {
-                if (unlikely(rktp->rktp_app_offset > rktp->rktp_hi_offset))
-                        consumer_lag = 0;
-                else
-                        consumer_lag = rktp->rktp_hi_offset -
-                                rktp->rktp_app_offset;
-        }
-
-	_st_printf("%s\"%"PRId32"\": { "
-		   "\"partition\":%"PRId32", "
-		   "\"leader\":%"PRId32", "
-		   "\"desired\":%s, "
-		   "\"unknown\":%s, "
-		   "\"msgq_cnt\":%i, "
-		   "\"msgq_bytes\":%"PRIu64", "
-		   "\"xmit_msgq_cnt\":%i, "
-		   "\"xmit_msgq_bytes\":%"PRIu64", "
-		   "\"fetchq_cnt\":%i, "
-		   "\"fetchq_size\":%"PRIu64", "
-		   "\"fetch_state\":\"%s\", "
-		   "\"query_offset\":%"PRId64", "
-		   "\"next_offset\":%"PRId64", "
-		   "\"app_offset\":%"PRId64", "
-		   "\"stored_offset\":%"PRId64", "
-		   "\"commited_offset\":%"PRId64", " /*FIXME: issue #80 */
-		   "\"committed_offset\":%"PRId64", "
-		   "\"eof_offset\":%"PRId64", "
-		   "\"lo_offset\":%"PRId64", "
-		   "\"hi_offset\":%"PRId64", "
-                   "\"consumer_lag\":%"PRId64", "
-		   "\"txmsgs\":%"PRIu64", "
-		   "\"txbytes\":%"PRIu64", "
-                   "\"msgs\": %"PRIu64", "
-                   "\"rx_ver_drops\": %"PRIu64" "
-		   "} ",
-		   first ? "" : ", ",
-		   rktp->rktp_partition,
-		   rktp->rktp_partition,
-                   leader_nodeid,
-		   (rktp->rktp_flags&RD_KAFKA_TOPPAR_F_DESIRED)?"true":"false",
-		   (rktp->rktp_flags&RD_KAFKA_TOPPAR_F_UNKNOWN)?"true":"false",
-		   rd_atomic32_get(&rktp->rktp_msgq.rkmq_msg_cnt),
-		   rd_atomic64_get(&rktp->rktp_msgq.rkmq_msg_bytes),
-		   rd_atomic32_get(&rktp->rktp_xmit_msgq.rkmq_msg_cnt),
-		   rd_atomic64_get(&rktp->rktp_xmit_msgq.rkmq_msg_bytes),
-		   rd_kafka_q_len(rktp->rktp_fetchq),
-		   rd_kafka_q_size(rktp->rktp_fetchq),
-		   rd_kafka_fetch_states[rktp->rktp_fetch_state],
-		   rktp->rktp_query_offset,
-                   offs.fetch_offset,
-		   rktp->rktp_app_offset,
-		   rktp->rktp_stored_offset,
-		   rktp->rktp_committed_offset, /* FIXME: issue #80 */
-		   rktp->rktp_committed_offset,
-                   offs.eof_offset,
-		   rktp->rktp_lo_offset,
-		   rktp->rktp_hi_offset,
-                   consumer_lag,
-                   rd_atomic64_get(&rktp->rktp_c.tx_msgs),
-		   rd_atomic64_get(&rktp->rktp_c.tx_bytes),
-		   rd_atomic64_get(&rktp->rktp_c.msgs),
-                   rd_atomic64_get(&rktp->rktp_c.rx_ver_drops));
-
-        rd_kafka_toppar_unlock(rktp);
-
-	*bufp = buf;
-	*sizep = size;
-	*ofp = of;
-}
-
-/**
- * Emit all statistics
- */
-static void rd_kafka_stats_emit_all (rd_kafka_t *rk) {
-	char  *buf;
-	size_t size = 1024*10;
-	size_t of = 0;
-	rd_kafka_broker_t *rkb;
-	rd_kafka_itopic_t *rkt;
-	shptr_rd_kafka_toppar_t *s_rktp;
-	rd_ts_t now;
-	rd_kafka_op_t *rko;
-	unsigned int tot_cnt;
-	size_t tot_size;
-
-	buf = rd_malloc(size);
-
-
-	rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size);
-	rd_kafka_rdlock(rk);
-
-	now = rd_clock();
-	_st_printf("{ "
-                   "\"name\": \"%s\", "
-                   "\"type\": \"%s\", "
-		   "\"ts\":%"PRId64", "
-		   "\"time\":%lli, "
-		   "\"replyq\":%i, "
-                   "\"msg_cnt\":%u, "
-		   "\"msg_size\":%"PRIusz", "
-                   "\"msg_max\":%u, "
-		   "\"msg_size_max\":%"PRIusz", "
-                   "\"simple_cnt\":%i, "
-                   "\"metadata_cache_cnt\":%i, "
-		   "\"brokers\":{ "/*open brokers*/,
-                   rk->rk_name,
-                   rd_kafka_type2str(rk->rk_type),
-		   now,
-		   (signed long long)time(NULL),
-		   rd_kafka_q_len(rk->rk_rep),
-		   tot_cnt, tot_size,
-		   rk->rk_curr_msgs.max_cnt, rk->rk_curr_msgs.max_size,
-                   rd_atomic32_get(&rk->rk_simple_cnt),
-                   rk->rk_metadata_cache.rkmc_cnt);
-
-
-	TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
-		rd_avg_t rtt, throttle, int_latency;
-		rd_kafka_toppar_t *rktp;
-
-		rd_kafka_broker_lock(rkb);
-		rd_avg_rollover(&int_latency, &rkb->rkb_avg_int_latency);
-		rd_avg_rollover(&rtt, &rkb->rkb_avg_rtt);
-		rd_avg_rollover(&throttle, &rkb->rkb_avg_throttle);
-		_st_printf("%s\"%s\": { "/*open broker*/
-			   "\"name\":\"%s\", "
-			   "\"nodeid\":%"PRId32", "
-			   "\"state\":\"%s\", "
-                           "\"stateage\":%"PRId64", "
-			   "\"outbuf_cnt\":%i, "
-			   "\"outbuf_msg_cnt\":%i, "
-			   "\"waitresp_cnt\":%i, "
-			   "\"waitresp_msg_cnt\":%i, "
-			   "\"tx\":%"PRIu64", "
-			   "\"txbytes\":%"PRIu64", "
-			   "\"txerrs\":%"PRIu64", "
-			   "\"txretries\":%"PRIu64", "
-			   "\"req_timeouts\":%"PRIu64", "
-			   "\"rx\":%"PRIu64", "
-			   "\"rxbytes\":%"PRIu64", "
-			   "\"rxerrs\":%"PRIu64", "
-                           "\"rxcorriderrs\":%"PRIu64", "
-                           "\"rxpartial\":%"PRIu64", "
-                           "\"zbuf_grow\":%"PRIu64", "
-                           "\"buf_grow\":%"PRIu64", "
-                           "\"wakeups\":%"PRIu64", "
-			   "\"int_latency\": {"
-			   " \"min\":%"PRId64","
-			   " \"max\":%"PRId64","
-			   " \"avg\":%"PRId64","
-			   " \"sum\":%"PRId64","
-			   " \"cnt\":%i "
-			   "}, "
-			   "\"rtt\": {"
-			   " \"min\":%"PRId64","
-			   " \"max\":%"PRId64","
-			   " \"avg\":%"PRId64","
-			   " \"sum\":%"PRId64","
-			   " \"cnt\":%i "
-			   "}, "
-			   "\"throttle\": {"
-			   " \"min\":%"PRId64","
-			   " \"max\":%"PRId64","
-			   " \"avg\":%"PRId64","
-			   " \"sum\":%"PRId64","
-			   " \"cnt\":%i "
-			   "}, "
-			   "\"toppars\":{ "/*open toppars*/,
-			   rkb == TAILQ_FIRST(&rk->rk_brokers) ? "" : ", ",
-			   rkb->rkb_name,
-			   rkb->rkb_name,
-			   rkb->rkb_nodeid,
-			   rd_kafka_broker_state_names[rkb->rkb_state],
-                           rkb->rkb_ts_state ? now - rkb->rkb_ts_state : 0,
-			   rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt),
-			   rd_atomic32_get(&rkb->rkb_outbufs.rkbq_msg_cnt),
-			   rd_atomic32_get(&rkb->rkb_waitresps.rkbq_cnt),
-			   rd_atomic32_get(&rkb->rkb_waitresps.rkbq_msg_cnt),
-			   rd_atomic64_get(&rkb->rkb_c.tx),
-			   rd_atomic64_get(&rkb->rkb_c.tx_bytes),
-			   rd_atomic64_get(&rkb->rkb_c.tx_err),
-			   rd_atomic64_get(&rkb->rkb_c.tx_retries),
-			   rd_atomic64_get(&rkb->rkb_c.req_timeouts),
-			   rd_atomic64_get(&rkb->rkb_c.rx),
-			   rd_atomic64_get(&rkb->rkb_c.rx_bytes),
-			   rd_atomic64_get(&rkb->rkb_c.rx_err),
-			   rd_atomic64_get(&rkb->rkb_c.rx_corrid_err),
-			   rd_atomic64_get(&rkb->rkb_c.rx_partial),
-                           rd_atomic64_get(&rkb->rkb_c.zbuf_grow),
-                           rd_atomic64_get(&rkb->rkb_c.buf_grow),
-                           rd_atomic64_get(&rkb->rkb_c.wakeups),
-			   int_latency.ra_v.minv,
-			   int_latency.ra_v.maxv,
-			   int_latency.ra_v.avg,
-			   int_latency.ra_v.sum,
-			   int_latency.ra_v.cnt,
-			   rtt.ra_v.minv,
-			   rtt.ra_v.maxv,
-			   rtt.ra_v.avg,
-			   rtt.ra_v.sum,
-			   rtt.ra_v.cnt,
-			   throttle.ra_v.minv,
-			   throttle.ra_v.maxv,
-			   throttle.ra_v.avg,
-			   throttle.ra_v.sum,
-			   throttle.ra_v.cnt);
-
-		TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) {
-			_st_printf("%s\"%.*s-%"PRId32"\": { "
-				   "\"topic\":\"%.*s\", "
-				   "\"partition\":%"PRId32"} ",
-				   rktp==TAILQ_FIRST(&rkb->rkb_toppars)?"":", ",
-				   RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                                   rktp->rktp_partition,
-				   RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-				   rktp->rktp_partition);
-		}
-
-		rd_kafka_broker_unlock(rkb);
-
-		_st_printf("} "/*close toppars*/
-			   "} "/*close broker*/);
-	}
-
-
-	_st_printf("}, " /* close "brokers" array */
-		   "\"topics\":{ ");
-
-	TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
-		int i, j;
-
-		rd_kafka_topic_rdlock(rkt);
-		_st_printf("%s\"%.*s\": { "
-			   "\"topic\":\"%.*s\", "
-			   "\"metadata_age\":%"PRId64", "
-			   "\"partitions\":{ " /*open partitions*/,
-			   rkt==TAILQ_FIRST(&rk->rk_topics)?"":", ",
-			   RD_KAFKAP_STR_PR(rkt->rkt_topic),
-			   RD_KAFKAP_STR_PR(rkt->rkt_topic),
-			   rkt->rkt_ts_metadata ?
-			   (rd_clock() - rkt->rkt_ts_metadata)/1000 : 0);
-
-		for (i = 0 ; i < rkt->rkt_partition_cnt ; i++)
-			rd_kafka_stats_emit_toppar(&buf, &size, &of,
-						   rd_kafka_toppar_s2i(rkt->rkt_p[i]),
-						   i == 0);
-
-                RD_LIST_FOREACH(s_rktp, &rkt->rkt_desp, j)
-			rd_kafka_stats_emit_toppar(&buf, &size, &of,
-						   rd_kafka_toppar_s2i(s_rktp),
-						   i+j == 0);
-
-                i += j;
-
-		if (rkt->rkt_ua)
-			rd_kafka_stats_emit_toppar(&buf, &size, &of,
-						   rd_kafka_toppar_s2i(rkt->rkt_ua),
-                                                   i++ == 0);
-		rd_kafka_topic_rdunlock(rkt);
-
-		_st_printf("} "/*close partitions*/
-			   "} "/*close topic*/);
-
-	}
-	_st_printf("} "/*close topics*/);
-
-        if (rk->rk_cgrp) {
-                rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
-                _st_printf(", \"cgrp\": { "
-                           "\"rebalance_age\": %"PRId64", "
-                           "\"rebalance_cnt\": %d, "
-                           "\"assignment_size\": %d }",
-                           rkcg->rkcg_c.ts_rebalance ?
-                           (rd_clock() - rkcg->rkcg_c.ts_rebalance)/1000 : 0,
-                           rkcg->rkcg_c.rebalance_cnt,
-                           rkcg->rkcg_c.assignment_size);
-        }
-	rd_kafka_rdunlock(rk);
-
-        _st_printf("}"/*close object*/);
-
-
-	/* Enqueue op for application */
-	rko = rd_kafka_op_new(RD_KAFKA_OP_STATS);
-        rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH);
-	rko->rko_u.stats.json = buf;
-	rko->rko_u.stats.json_len = of;
-	rd_kafka_q_enq(rk->rk_rep, rko);
-}
-
-
-
-static void rd_kafka_topic_scan_tmr_cb (rd_kafka_timers_t *rkts, void *arg) {
-        rd_kafka_t *rk = rkts->rkts_rk;
-	rd_kafka_topic_scan_all(rk, rd_clock());
-}
-
-static void rd_kafka_stats_emit_tmr_cb (rd_kafka_timers_t *rkts, void *arg) {
-        rd_kafka_t *rk = rkts->rkts_rk;
-	rd_kafka_stats_emit_all(rk);
-}
-
-
-/**
- * @brief Periodic metadata refresh callback
- *
- * @locality rdkafka main thread
- */
-static void rd_kafka_metadata_refresh_cb (rd_kafka_timers_t *rkts, void *arg) {
-        rd_kafka_t *rk = rkts->rkts_rk;
-        int sparse = 1;
-
-        /* Dont do sparse requests if there is a consumer group with an
-         * active subscription since subscriptions need to be able to match
-         * on all topics. */
-        if (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_cgrp &&
-            rk->rk_cgrp->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION)
-                sparse = 0;
-
-        if (sparse)
-                rd_kafka_metadata_refresh_known_topics(rk, NULL, 1/*force*/,
-                                                       "periodic refresh");
-        else
-                rd_kafka_metadata_refresh_all(rk, NULL, "periodic refresh");
-}
-
-
-/**
- * Main loop for Kafka handler thread.
- */
-static int rd_kafka_thread_main (void *arg) {
-        rd_kafka_t *rk = arg;
-	rd_kafka_timer_t tmr_topic_scan = RD_ZERO_INIT;
-	rd_kafka_timer_t tmr_stats_emit = RD_ZERO_INIT;
-	rd_kafka_timer_t tmr_metadata_refresh = RD_ZERO_INIT;
-
-        rd_snprintf(rd_kafka_thread_name, sizeof(rd_kafka_thread_name), "main");
-
-	(void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1);
-
-	/* Acquire lock (which was held by thread creator during creation)
-	 * to synchronise state. */
-	rd_kafka_wrlock(rk);
-	rd_kafka_wrunlock(rk);
-
-	rd_kafka_timer_start(&rk->rk_timers, &tmr_topic_scan, 1000000,
-			     rd_kafka_topic_scan_tmr_cb, NULL);
-	rd_kafka_timer_start(&rk->rk_timers, &tmr_stats_emit,
-			     rk->rk_conf.stats_interval_ms * 1000ll,
-			     rd_kafka_stats_emit_tmr_cb, NULL);
-        if (rk->rk_conf.metadata_refresh_interval_ms > 0)
-                rd_kafka_timer_start(&rk->rk_timers, &tmr_metadata_refresh,
-                                     rk->rk_conf.metadata_refresh_interval_ms *
-                                     1000ll,
-                                     rd_kafka_metadata_refresh_cb, NULL);
-
-        if (rk->rk_cgrp) {
-                rd_kafka_cgrp_reassign_broker(rk->rk_cgrp);
-                rd_kafka_q_fwd_set(rk->rk_cgrp->rkcg_ops, rk->rk_ops);
-        }
-
-	while (likely(!rd_kafka_terminating(rk) ||
-		      rd_kafka_q_len(rk->rk_ops))) {
-                rd_ts_t sleeptime = rd_kafka_timers_next(
-                        &rk->rk_timers, 1000*1000/*1s*/, 1/*lock*/);
-                rd_kafka_q_serve(rk->rk_ops, (int)(sleeptime / 1000), 0,
-                                 RD_KAFKA_Q_CB_CALLBACK, NULL, NULL);
-		if (rk->rk_cgrp) /* FIXME: move to timer-triggered */
-			rd_kafka_cgrp_serve(rk->rk_cgrp);
-		rd_kafka_timers_run(&rk->rk_timers, RD_POLL_NOWAIT);
-	}
-
-	rd_kafka_q_disable(rk->rk_ops);
-	rd_kafka_q_purge(rk->rk_ops);
-
-        rd_kafka_timer_stop(&rk->rk_timers, &tmr_topic_scan, 1);
-        rd_kafka_timer_stop(&rk->rk_timers, &tmr_stats_emit, 1);
-        rd_kafka_timer_stop(&rk->rk_timers, &tmr_metadata_refresh, 1);
-
-        /* Synchronise state */
-        rd_kafka_wrlock(rk);
-        rd_kafka_wrunlock(rk);
-
-        rd_kafka_destroy_internal(rk);
-
-        rd_kafka_dbg(rk, GENERIC, "TERMINATE",
-                     "Main background thread exiting");
-
-	rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1);
-
-	return 0;
-}
-
-
-static void rd_kafka_term_sig_handler (int sig) {
-	/* nop */
-}
-
-
-rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf,
-			  char *errstr, size_t errstr_size) {
-	rd_kafka_t *rk;
-	static rd_atomic32_t rkid;
-        rd_kafka_conf_t *conf;
-        rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR;
-        int ret_errno = 0;
-#ifndef _MSC_VER
-        sigset_t newset, oldset;
-#endif
-
-	call_once(&rd_kafka_global_init_once, rd_kafka_global_init);
-
-        /* rd_kafka_new() takes ownership of the provided \p app_conf
-         * object if rd_kafka_new() succeeds.
-         * Since \p app_conf is optional we allocate a default configuration
-         * object here if \p app_conf is NULL.
-         * The configuration object itself is struct-copied later
-         * leaving the default *conf pointer to be ready for freeing.
-         * In case new() fails and app_conf was specified we will clear out
-         * rk_conf to avoid double-freeing from destroy_internal() and the
-         * user's eventual call to rd_kafka_conf_destroy().
-         * This is all a bit tricky but that's the nature of
-         * legacy interfaces. */
-        if (!app_conf)
-                conf = rd_kafka_conf_new();
-        else
-                conf = app_conf;
-
-        /* Verify mandatory configuration */
-        if (!conf->socket_cb) {
-                rd_snprintf(errstr, errstr_size,
-                            "Mandatory config property 'socket_cb' not set");
-                if (!app_conf)
-                        rd_kafka_conf_destroy(conf);
-                rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
-                return NULL;
-        }
-
-        if (!conf->open_cb) {
-                rd_snprintf(errstr, errstr_size,
-                            "Mandatory config property 'open_cb' not set");
-                if (!app_conf)
-                        rd_kafka_conf_destroy(conf);
-                rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
-                return NULL;
-        }
-
-        if (conf->metadata_max_age_ms == -1) {
-                if (conf->metadata_refresh_interval_ms > 0)
-                        conf->metadata_max_age_ms =
-                                conf->metadata_refresh_interval_ms * 3;
-                else /* use default value of refresh * 3 */
-                        conf->metadata_max_age_ms = 5*60*1000 * 3;
-        }
-
-	rd_kafka_global_cnt_incr();
-
-	/*
-	 * Set up the handle.
-	 */
-	rk = rd_calloc(1, sizeof(*rk));
-
-	rk->rk_type = type;
-
-        /* Struct-copy the config object. */
-	rk->rk_conf = *conf;
-        if (!app_conf)
-                rd_free(conf); /* Free the base config struct only,
-                                * not its fields since they were copied to
-                                * rk_conf just above. Those fields are
-                                * freed from rd_kafka_destroy_internal()
-                                * as the rk itself is destroyed. */
-
-        /* Call on_new() interceptors */
-        rd_kafka_interceptors_on_new(rk, &rk->rk_conf);
-
-	rwlock_init(&rk->rk_lock);
-        mtx_init(&rk->rk_internal_rkb_lock, mtx_plain);
-
-	cnd_init(&rk->rk_broker_state_change_cnd);
-	mtx_init(&rk->rk_broker_state_change_lock, mtx_plain);
-
-	rk->rk_rep = rd_kafka_q_new(rk);
-	rk->rk_ops = rd_kafka_q_new(rk);
-        rk->rk_ops->rkq_serve = rd_kafka_poll_cb;
-        rk->rk_ops->rkq_opaque = rk;
-
-        if (rk->rk_conf.log_queue) {
-                rk->rk_logq = rd_kafka_q_new(rk);
-                rk->rk_logq->rkq_serve = rd_kafka_poll_cb;
-                rk->rk_logq->rkq_opaque = rk;
-        }
-
-	TAILQ_INIT(&rk->rk_brokers);
-	TAILQ_INIT(&rk->rk_topics);
-        rd_kafka_timers_init(&rk->rk_timers, rk);
-        rd_kafka_metadata_cache_init(rk);
-
-	if (rk->rk_conf.dr_cb || rk->rk_conf.dr_msg_cb)
-		rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_DR;
-	if (rk->rk_conf.rebalance_cb)
-		rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_REBALANCE;
-	if (rk->rk_conf.offset_commit_cb)
-		rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_OFFSET_COMMIT;
-
-	/* Convenience Kafka protocol null bytes */
-	rk->rk_null_bytes = rd_kafkap_bytes_new(NULL, 0);
-
-	if (rk->rk_conf.debug)
-                rk->rk_conf.log_level = LOG_DEBUG;
-
-	rd_snprintf(rk->rk_name, sizeof(rk->rk_name), "%s#%s-%i",
-                    rk->rk_conf.client_id_str, rd_kafka_type2str(rk->rk_type),
-                    rd_atomic32_add(&rkid, 1));
-
-	/* Construct clientid kafka string */
-	rk->rk_client_id = rd_kafkap_str_new(rk->rk_conf.client_id_str,-1);
-
-        /* Convert group.id to kafka string (may be NULL) */
-        rk->rk_group_id = rd_kafkap_str_new(rk->rk_conf.group_id_str,-1);
-
-        /* Config fixups */
-        rk->rk_conf.queued_max_msg_bytes =
-                (int64_t)rk->rk_conf.queued_max_msg_kbytes * 1000ll;
-
-	/* Enable api.version.request=true if fallback.broker.version
-	 * indicates a supporting broker. */
-	if (rd_kafka_ApiVersion_is_queryable(rk->rk_conf.broker_version_fallback))
-		rk->rk_conf.api_version_request = 1;
-
-	if (rk->rk_type == RD_KAFKA_PRODUCER) {
-		mtx_init(&rk->rk_curr_msgs.lock, mtx_plain);
-		cnd_init(&rk->rk_curr_msgs.cnd);
-		rk->rk_curr_msgs.max_cnt =
-			rk->rk_conf.queue_buffering_max_msgs;
-                if ((unsigned long long)rk->rk_conf.queue_buffering_max_kbytes * 1024 >
-                    (unsigned long long)SIZE_MAX)
-                        rk->rk_curr_msgs.max_size = SIZE_MAX;
-                else
-                        rk->rk_curr_msgs.max_size =
-                        (size_t)rk->rk_conf.queue_buffering_max_kbytes * 1024;
-	}
-
-        if (rd_kafka_assignors_init(rk, errstr, errstr_size) == -1) {
-                ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG;
-                ret_errno = EINVAL;
-                goto fail;
-        }
-
-        if (rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL ||
-            rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_PLAINTEXT) {
-                if (rd_kafka_sasl_select_provider(rk,
-                                                  errstr, errstr_size) == -1) {
-                        ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG;
-                        ret_errno = EINVAL;
-                        goto fail;
-                }
-        }
-
-#if WITH_SSL
-	if (rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SSL ||
-	    rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL) {
-		/* Create SSL context */
-		if (rd_kafka_transport_ssl_ctx_init(rk, errstr,
-						    errstr_size) == -1) {
-                        ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG;
-                        ret_errno = EINVAL;
-                        goto fail;
-                }
-        }
-#endif
-
-	/* Client group, eligible both in consumer and producer mode. */
-        if (type == RD_KAFKA_CONSUMER &&
-	    RD_KAFKAP_STR_LEN(rk->rk_group_id) > 0)
-                rk->rk_cgrp = rd_kafka_cgrp_new(rk,
-                                                rk->rk_group_id,
-                                                rk->rk_client_id);
-
-
-
-#ifndef _MSC_VER
-        /* Block all signals in newly created thread.
-         * To avoid race condition we block all signals in the calling
-         * thread, which the new thread will inherit its sigmask from,
-         * and then restore the original sigmask of the calling thread when
-         * we're done creating the thread. */
-        sigemptyset(&oldset);
-        sigfillset(&newset);
-	if (rk->rk_conf.term_sig) {
-		struct sigaction sa_term = {
-			.sa_handler = rd_kafka_term_sig_handler
-		};
-		sigaction(rk->rk_conf.term_sig, &sa_term, NULL);
-	}
-        pthread_sigmask(SIG_SETMASK, &newset, &oldset);
-#endif
-
-	/* Lock handle here to synchronise state, i.e., hold off
-	 * the thread until we've finalized the handle. */
-	rd_kafka_wrlock(rk);
-
-	/* Create handler thread */
-	if ((thrd_create(&rk->rk_thread,
-			 rd_kafka_thread_main, rk)) != thrd_success) {
-                ret_err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
-                ret_errno = errno;
-		if (errstr)
-			rd_snprintf(errstr, errstr_size,
-				    "Failed to create thread: %s (%i)",
-				    rd_strerror(errno), errno);
-		rd_kafka_wrunlock(rk);
-#ifndef _MSC_VER
-                /* Restore sigmask of caller */
-                pthread_sigmask(SIG_SETMASK, &oldset, NULL);
-#endif
-                goto fail;
-        }
-
-        rd_kafka_wrunlock(rk);
-
-        rk->rk_eos.PID = -1;
-        rk->rk_eos.TransactionalId = rd_kafkap_str_new(NULL, 0);
-
-        mtx_lock(&rk->rk_internal_rkb_lock);
-	rk->rk_internal_rkb = rd_kafka_broker_add(rk, RD_KAFKA_INTERNAL,
-						  RD_KAFKA_PROTO_PLAINTEXT,
-						  "", 0, RD_KAFKA_NODEID_UA);
-        mtx_unlock(&rk->rk_internal_rkb_lock);
-
-	/* Add initial list of brokers from configuration */
-	if (rk->rk_conf.brokerlist) {
-		if (rd_kafka_brokers_add0(rk, rk->rk_conf.brokerlist) == 0)
-			rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN,
-					"No brokers configured");
-	}
-
-#ifndef _MSC_VER
-	/* Restore sigmask of caller */
-	pthread_sigmask(SIG_SETMASK, &oldset, NULL);
-#endif
-
-        /* Free user supplied conf's base pointer on success,
-         * but not the actual allocated fields since the struct
-         * will have been copied in its entirety above. */
-        if (app_conf)
-                rd_free(app_conf);
-	rd_kafka_set_last_error(0, 0);
-
-        rk->rk_initialized = 1;
-
-	return rk;
-
-fail:
-        /*
-         * Error out and clean up
-         */
-
-        /* If on_new() interceptors have been called we also need
-         * to allow interceptor clean-up by calling on_destroy() */
-        rd_kafka_interceptors_on_destroy(rk);
-
-        /* If rk_conf is a struct-copy of the application configuration
-         * we need to avoid rk_conf fields from being freed from
-         * rd_kafka_destroy_internal() since they belong to app_conf.
-         * However, there are some internal fields, such as interceptors,
-         * that belong to rk_conf and thus needs to be cleaned up.
-         * Legacy APIs, sigh.. */
-        if (app_conf) {
-                rd_kafka_assignors_term(rk);
-                rd_kafka_interceptors_destroy(&rk->rk_conf);
-                memset(&rk->rk_conf, 0, sizeof(rk->rk_conf));
-        }
-
-        rd_atomic32_add(&rk->rk_terminate, 1);
-        rd_kafka_destroy_internal(rk);
-        rd_kafka_destroy_final(rk);
-
-        rd_kafka_set_last_error(ret_err, ret_errno);
-
-        return NULL;
-}
-
-
-
-
-
-/**
- * Produce a single message.
- * Locality: any application thread
- */
-int rd_kafka_produce (rd_kafka_topic_t *rkt, int32_t partition,
-		      int msgflags,
-		      void *payload, size_t len,
-		      const void *key, size_t keylen,
-		      void *msg_opaque) {
-	return rd_kafka_msg_new(rd_kafka_topic_a2i(rkt), partition,
-				msgflags, payload, len,
-				key, keylen, msg_opaque);
-}
-
-
-/**
- * Counts usage of the legacy/simple consumer (rd_kafka_consume_start() with
- * friends) since it does not have an API for stopping the cgrp we will need to
- * sort that out automatically in the background when all consumption
- * has stopped.
- *
- * Returns 0 if a  High level consumer is already instantiated
- * which means a Simple consumer cannot co-operate with it, else 1.
- *
- * A rd_kafka_t handle can never migrate from simple to high-level, or
- * vice versa, so we dont need a ..consumer_del().
- */
-int rd_kafka_simple_consumer_add (rd_kafka_t *rk) {
-        if (rd_atomic32_get(&rk->rk_simple_cnt) < 0)
-                return 0;
-
-        return (int)rd_atomic32_add(&rk->rk_simple_cnt, 1);
-}
-
-
-
-
-/**
- * rktp fetch is split up in these parts:
- *   * application side:
- *   * broker side (handled by current leader broker thread for rktp):
- *          - the fetch state, initial offset, etc.
- *          - fetching messages, updating fetched offset, etc.
- *          - offset commits
- *
- * Communication between the two are:
- *    app side -> rdkafka main side: rktp_ops
- *    broker thread -> app side: rktp_fetchq
- *
- * There is no shared state between these threads, instead
- * state is communicated through the two op queues, and state synchronization
- * is performed by version barriers.
- *
- */
-
-static RD_UNUSED
-int rd_kafka_consume_start0 (rd_kafka_itopic_t *rkt, int32_t partition,
-				    int64_t offset, rd_kafka_q_t *rkq) {
-	shptr_rd_kafka_toppar_t *s_rktp;
-
-	if (partition < 0) {
-		rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
-					ESRCH);
-		return -1;
-	}
-
-        if (!rd_kafka_simple_consumer_add(rkt->rkt_rk)) {
-		rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
-                return -1;
-        }
-
-	rd_kafka_topic_wrlock(rkt);
-	s_rktp = rd_kafka_toppar_desired_add(rkt, partition);
-	rd_kafka_topic_wrunlock(rkt);
-
-        /* Verify offset */
-	if (offset == RD_KAFKA_OFFSET_BEGINNING ||
-	    offset == RD_KAFKA_OFFSET_END ||
-            offset <= RD_KAFKA_OFFSET_TAIL_BASE) {
-                /* logical offsets */
-
-	} else if (offset == RD_KAFKA_OFFSET_STORED) {
-		/* offset manager */
-
-                if (rkt->rkt_conf.offset_store_method ==
-                    RD_KAFKA_OFFSET_METHOD_BROKER &&
-                    RD_KAFKAP_STR_IS_NULL(rkt->rkt_rk->rk_group_id)) {
-                        /* Broker based offsets require a group id. */
-                        rd_kafka_toppar_destroy(s_rktp);
-			rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG,
-						EINVAL);
-                        return -1;
-                }
-
-	} else if (offset < 0) {
-		rd_kafka_toppar_destroy(s_rktp);
-		rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG,
-					EINVAL);
-		return -1;
-
-        }
-
-        rd_kafka_toppar_op_fetch_start(rd_kafka_toppar_s2i(s_rktp), offset,
-				       rkq, RD_KAFKA_NO_REPLYQ);
-
-        rd_kafka_toppar_destroy(s_rktp);
-
-	rd_kafka_set_last_error(0, 0);
-	return 0;
-}
-
-
-
-
-int rd_kafka_consume_start (rd_kafka_topic_t *app_rkt, int32_t partition,
-			    int64_t offset) {
-        rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt);
-        rd_kafka_dbg(rkt->rkt_rk, TOPIC, "START",
-                     "Start consuming partition %"PRId32,partition);
- 	return rd_kafka_consume_start0(rkt, partition, offset, NULL);
-}
-
-int rd_kafka_consume_start_queue (rd_kafka_topic_t *app_rkt, int32_t partition,
-				  int64_t offset, rd_kafka_queue_t *rkqu) {
-        rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt);
-
- 	return rd_kafka_consume_start0(rkt, partition, offset, rkqu->rkqu_q);
-}
-
-
-
-
-static RD_UNUSED int rd_kafka_consume_stop0 (rd_kafka_toppar_t *rktp) {
-        rd_kafka_q_t *tmpq = NULL;
-        rd_kafka_resp_err_t err;
-
-        rd_kafka_topic_wrlock(rktp->rktp_rkt);
-        rd_kafka_toppar_lock(rktp);
-	rd_kafka_toppar_desired_del(rktp);
-        rd_kafka_toppar_unlock(rktp);
-	rd_kafka_topic_wrunlock(rktp->rktp_rkt);
-
-        tmpq = rd_kafka_q_new(rktp->rktp_rkt->rkt_rk);
-
-        rd_kafka_toppar_op_fetch_stop(rktp, RD_KAFKA_REPLYQ(tmpq, 0));
-
-        /* Synchronisation: Wait for stop reply from broker thread */
-        err = rd_kafka_q_wait_result(tmpq, RD_POLL_INFINITE);
-        rd_kafka_q_destroy(tmpq);
-
-	rd_kafka_set_last_error(err, err ? EINVAL : 0);
-
-	return err ? -1 : 0;
-}
-
-
-int rd_kafka_consume_stop (rd_kafka_topic_t *app_rkt, int32_t partition) {
-        rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt);
-	shptr_rd_kafka_toppar_t *s_rktp;
-        int r;
-
-	if (partition == RD_KAFKA_PARTITION_UA) {
-		rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
-		return -1;
-	}
-
-	rd_kafka_topic_wrlock(rkt);
-	if (!(s_rktp = rd_kafka_toppar_get(rkt, partition, 0)) &&
-	    !(s_rktp = rd_kafka_toppar_desired_get(rkt, partition))) {
-		rd_kafka_topic_wrunlock(rkt);
-		rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
-					ESRCH);
-		return -1;
-	}
-        rd_kafka_topic_wrunlock(rkt);
-
-        r = rd_kafka_consume_stop0(rd_kafka_toppar_s2i(s_rktp));
-	/* set_last_error() called by stop0() */
-
-        rd_kafka_toppar_destroy(s_rktp);
-
-        return r;
-}
-
-
-
-rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *app_rkt,
-                                   int32_t partition,
-                                   int64_t offset,
-                                   int timeout_ms) {
-        rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt);
-        shptr_rd_kafka_toppar_t *s_rktp;
-	rd_kafka_toppar_t *rktp;
-        rd_kafka_q_t *tmpq = NULL;
-        rd_kafka_resp_err_t err;
-
-        /* FIXME: simple consumer check */
-
-	if (partition == RD_KAFKA_PARTITION_UA)
-                return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
-	rd_kafka_topic_rdlock(rkt);
-	if (!(s_rktp = rd_kafka_toppar_get(rkt, partition, 0)) &&
-	    !(s_rktp = rd_kafka_toppar_desired_get(rkt, partition))) {
-		rd_kafka_topic_rdunlock(rkt);
-                return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-	}
-	rd_kafka_topic_rdunlock(rkt);
-
-        if (timeout_ms)
-                tmpq = rd_kafka_q_new(rkt->rkt_rk);
-
-        rktp = rd_kafka_toppar_s2i(s_rktp);
-        if ((err = rd_kafka_toppar_op_seek(rktp, offset,
-					   RD_KAFKA_REPLYQ(tmpq, 0)))) {
-                if (tmpq)
-                        rd_kafka_q_destroy(tmpq);
-                rd_kafka_toppar_destroy(s_rktp);
-                return err;
-        }
-
-	rd_kafka_toppar_destroy(s_rktp);
-
-        if (tmpq) {
-                err = rd_kafka_q_wait_result(tmpq, timeout_ms);
-                rd_kafka_q_destroy(tmpq);
-                return err;
-        }
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-static ssize_t rd_kafka_consume_batch0 (rd_kafka_q_t *rkq,
-					int timeout_ms,
-					rd_kafka_message_t **rkmessages,
-					size_t rkmessages_size) {
-	/* Populate application's rkmessages array. */
-	return rd_kafka_q_serve_rkmessages(rkq, timeout_ms,
-					   rkmessages, rkmessages_size);
-}
-
-
-ssize_t rd_kafka_consume_batch (rd_kafka_topic_t *app_rkt, int32_t partition,
-				int timeout_ms,
-				rd_kafka_message_t **rkmessages,
-				size_t rkmessages_size) {
-        rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt);
-	shptr_rd_kafka_toppar_t *s_rktp;
-        rd_kafka_toppar_t *rktp;
-	ssize_t cnt;
-
-	/* Get toppar */
-	rd_kafka_topic_rdlock(rkt);
-	s_rktp = rd_kafka_toppar_get(rkt, partition, 0/*no ua on miss*/);
-	if (unlikely(!s_rktp))
-		s_rktp = rd_kafka_toppar_desired_get(rkt, partition);
-	rd_kafka_topic_rdunlock(rkt);
-
-	if (unlikely(!s_rktp)) {
-		/* No such toppar known */
-		rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
-					ESRCH);
-		return -1;
-	}
-
-        rktp = rd_kafka_toppar_s2i(s_rktp);
-
-	/* Populate application's rkmessages array. */
-	cnt = rd_kafka_q_serve_rkmessages(rktp->rktp_fetchq, timeout_ms,
-					  rkmessages, rkmessages_size);
-
-	rd_kafka_toppar_destroy(s_rktp); /* refcnt from .._get() */
-
-	rd_kafka_set_last_error(0, 0);
-
-	return cnt;
-}
-
-ssize_t rd_kafka_consume_batch_queue (rd_kafka_queue_t *rkqu,
-				      int timeout_ms,
-				      rd_kafka_message_t **rkmessages,
-				      size_t rkmessages_size) {
-	/* Populate application's rkmessages array. */
-	return rd_kafka_consume_batch0(rkqu->rkqu_q, timeout_ms,
-				       rkmessages, rkmessages_size);
-}
-
-
-struct consume_ctx {
-	void (*consume_cb) (rd_kafka_message_t *rkmessage, void *opaque);
-	void *opaque;
-};
-
-
-/**
- * Trampoline for application's consume_cb()
- */
-static rd_kafka_op_res_t
-rd_kafka_consume_cb (rd_kafka_t *rk,
-                     rd_kafka_q_t *rkq,
-                     rd_kafka_op_t *rko,
-                     rd_kafka_q_cb_type_t cb_type, void *opaque) {
-	struct consume_ctx *ctx = opaque;
-	rd_kafka_message_t *rkmessage;
-
-        if (unlikely(rd_kafka_op_version_outdated(rko, 0))) {
-                rd_kafka_op_destroy(rko);
-                return RD_KAFKA_OP_RES_HANDLED;
-        }
-
-	rkmessage = rd_kafka_message_get(rko);
-
-	rd_kafka_op_offset_store(rk, rko, rkmessage);
-
-	ctx->consume_cb(rkmessage, ctx->opaque);
-
-        rd_kafka_op_destroy(rko);
-
-        return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-
-static rd_kafka_op_res_t
-rd_kafka_consume_callback0 (rd_kafka_q_t *rkq, int timeout_ms, int max_cnt,
-                            void (*consume_cb) (rd_kafka_message_t
-                                                *rkmessage,
-                                                void *opaque),
-                            void *opaque) {
-        struct consume_ctx ctx = { .consume_cb = consume_cb, .opaque = opaque };
-        return rd_kafka_q_serve(rkq, timeout_ms, max_cnt,
-                                RD_KAFKA_Q_CB_RETURN,
-                                rd_kafka_consume_cb, &ctx);
-
-}
-
-
-int rd_kafka_consume_callback (rd_kafka_topic_t *app_rkt, int32_t partition,
-			       int timeout_ms,
-			       void (*consume_cb) (rd_kafka_message_t
-						   *rkmessage,
-						   void *opaque),
-			       void *opaque) {
-        rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt);
-        shptr_rd_kafka_toppar_t *s_rktp;
-	rd_kafka_toppar_t *rktp;
-	int r;
-
-	/* Get toppar */
-	rd_kafka_topic_rdlock(rkt);
-	s_rktp = rd_kafka_toppar_get(rkt, partition, 0/*no ua on miss*/);
-	if (unlikely(!s_rktp))
-		s_rktp = rd_kafka_toppar_desired_get(rkt, partition);
-	rd_kafka_topic_rdunlock(rkt);
-
-	if (unlikely(!s_rktp)) {
-		/* No such toppar known */
-		rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
-					ESRCH);
-		return -1;
-	}
-
-        rktp = rd_kafka_toppar_s2i(s_rktp);
-	r = rd_kafka_consume_callback0(rktp->rktp_fetchq, timeout_ms,
-                                       rkt->rkt_conf.consume_callback_max_msgs,
-				       consume_cb, opaque);
-
-	rd_kafka_toppar_destroy(s_rktp);
-
-	rd_kafka_set_last_error(0, 0);
-
-	return r;
-}
-
-
-
-int rd_kafka_consume_callback_queue (rd_kafka_queue_t *rkqu,
-				     int timeout_ms,
-				     void (*consume_cb) (rd_kafka_message_t
-							 *rkmessage,
-							 void *opaque),
-				     void *opaque) {
-	return rd_kafka_consume_callback0(rkqu->rkqu_q, timeout_ms, 0,
-					  consume_cb, opaque);
-}
-
-
-/**
- * Serve queue 'rkq' and return one message.
- * By serving the queue it will also call any registered callbacks
- * registered for matching events, this includes consumer_cb()
- * in which case no message will be returned.
- */
-static rd_kafka_message_t *rd_kafka_consume0 (rd_kafka_t *rk,
-                                              rd_kafka_q_t *rkq,
-					      int timeout_ms) {
-	rd_kafka_op_t *rko;
-	rd_kafka_message_t *rkmessage = NULL;
-	rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
-
-	rd_kafka_yield_thread = 0;
-        while ((rko = rd_kafka_q_pop(rkq,
-                                     rd_timeout_remains(abs_timeout), 0))) {
-                rd_kafka_op_res_t res;
-
-                res = rd_kafka_poll_cb(rk, rkq, rko,
-                                       RD_KAFKA_Q_CB_RETURN, NULL);
-
-                if (res == RD_KAFKA_OP_RES_PASS)
-                        break;
-
-                if (unlikely(res == RD_KAFKA_OP_RES_YIELD ||
-                            rd_kafka_yield_thread)) {
-                        /* Callback called rd_kafka_yield(), we must
-                         * stop dispatching the queue and return. */
-                        rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INTR,
-                                                EINTR);
-                        return NULL;
-                }
-
-                /* Message was handled by callback. */
-                continue;
-        }
-
-	if (!rko) {
-		/* Timeout reached with no op returned. */
-		rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT,
-					ETIMEDOUT);
-		return NULL;
-	}
-
-        rd_kafka_assert(rk,
-                        rko->rko_type == RD_KAFKA_OP_FETCH ||
-                        rko->rko_type == RD_KAFKA_OP_CONSUMER_ERR);
-
-	/* Get rkmessage from rko */
-	rkmessage = rd_kafka_message_get(rko);
-
-	/* Store offset */
-	rd_kafka_op_offset_store(rk, rko, rkmessage);
-
-	rd_kafka_set_last_error(0, 0);
-
-	return rkmessage;
-}
-
-rd_kafka_message_t *rd_kafka_consume (rd_kafka_topic_t *app_rkt,
-                                      int32_t partition,
-				      int timeout_ms) {
-        rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt);
-        shptr_rd_kafka_toppar_t *s_rktp;
-	rd_kafka_toppar_t *rktp;
-	rd_kafka_message_t *rkmessage;
-
-	rd_kafka_topic_rdlock(rkt);
-	s_rktp = rd_kafka_toppar_get(rkt, partition, 0/*no ua on miss*/);
-	if (unlikely(!s_rktp))
-		s_rktp = rd_kafka_toppar_desired_get(rkt, partition);
-	rd_kafka_topic_rdunlock(rkt);
-
-	if (unlikely(!s_rktp)) {
-		/* No such toppar known */
-		rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
-					ESRCH);
-		return NULL;
-	}
-
-        rktp = rd_kafka_toppar_s2i(s_rktp);
-	rkmessage = rd_kafka_consume0(rkt->rkt_rk,
-                                      rktp->rktp_fetchq, timeout_ms);
-
-	rd_kafka_toppar_destroy(s_rktp); /* refcnt from .._get() */
-
-	return rkmessage;
-}
-
-
-rd_kafka_message_t *rd_kafka_consume_queue (rd_kafka_queue_t *rkqu,
-					    int timeout_ms) {
-	return rd_kafka_consume0(rkqu->rkqu_rk, rkqu->rkqu_q, timeout_ms);
-}
-
-
-
-
-rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk) {
-        rd_kafka_cgrp_t *rkcg;
-
-        if (!(rkcg = rd_kafka_cgrp_get(rk)))
-                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
-        rd_kafka_q_fwd_set(rk->rk_rep, rkcg->rkcg_q);
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-
-rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk,
-                                            int timeout_ms) {
-        rd_kafka_cgrp_t *rkcg;
-
-        if (unlikely(!(rkcg = rd_kafka_cgrp_get(rk)))) {
-                rd_kafka_message_t *rkmessage = rd_kafka_message_new();
-                rkmessage->err = RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-                return rkmessage;
-        }
-
-        return rd_kafka_consume0(rk, rkcg->rkcg_q, timeout_ms);
-}
-
-
-rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk) {
-        rd_kafka_cgrp_t *rkcg;
-        rd_kafka_op_t *rko;
-        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__TIMED_OUT;
-	rd_kafka_q_t *rkq;
-
-        if (!(rkcg = rd_kafka_cgrp_get(rk)))
-                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
-	/* Redirect cgrp queue to our temporary queue to make sure
-	 * all posted ops (e.g., rebalance callbacks) are served by
-	 * this function. */
-	rkq = rd_kafka_q_new(rk);
-	rd_kafka_q_fwd_set(rkcg->rkcg_q, rkq);
-
-        rd_kafka_cgrp_terminate(rkcg, RD_KAFKA_REPLYQ(rkq, 0)); /* async */
-
-        while ((rko = rd_kafka_q_pop(rkq, RD_POLL_INFINITE, 0))) {
-                rd_kafka_op_res_t res;
-                if ((rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) ==
-		    RD_KAFKA_OP_TERMINATE) {
-                        err = rko->rko_err;
-                        rd_kafka_op_destroy(rko);
-                        break;
-                }
-                res = rd_kafka_poll_cb(rk, rkq, rko,
-                                       RD_KAFKA_Q_CB_RETURN, NULL);
-                if (res == RD_KAFKA_OP_RES_PASS)
-                        rd_kafka_op_destroy(rko);
-                /* Ignore YIELD, we need to finish */
-        }
-
-        rd_kafka_q_destroy(rkq);
-
-	rd_kafka_q_fwd_set(rkcg->rkcg_q, NULL);
-
-        return err;
-}
-
-
-
-rd_kafka_resp_err_t
-rd_kafka_committed (rd_kafka_t *rk,
-		    rd_kafka_topic_partition_list_t *partitions,
-		    int timeout_ms) {
-        rd_kafka_q_t *rkq;
-        rd_kafka_resp_err_t err;
-        rd_kafka_cgrp_t *rkcg;
-	rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
-
-        if (!partitions)
-                return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
-        if (!(rkcg = rd_kafka_cgrp_get(rk)))
-                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
-	/* Set default offsets. */
-	rd_kafka_topic_partition_list_reset_offsets(partitions,
-                                                    RD_KAFKA_OFFSET_INVALID);
-
-	rkq = rd_kafka_q_new(rk);
-
-        do {
-                rd_kafka_op_t *rko;
-		int state_version = rd_kafka_brokers_get_state_version(rk);
-
-                rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH);
-		rd_kafka_op_set_replyq(rko, rkq, NULL);
-
-                /* Issue #827
-                 * Copy partition list to avoid use-after-free if we time out
-                 * here, the app frees the list, and then cgrp starts
-                 * processing the op. */
-		rko->rko_u.offset_fetch.partitions =
-                        rd_kafka_topic_partition_list_copy(partitions);
-		rko->rko_u.offset_fetch.do_free = 1;
-
-                if (!rd_kafka_q_enq(rkcg->rkcg_ops, rko)) {
-                        err = RD_KAFKA_RESP_ERR__DESTROY;
-                        break;
-                }
-
-                rko = rd_kafka_q_pop(rkq, rd_timeout_remains(abs_timeout), 0);
-                if (rko) {
-                        if (!(err = rko->rko_err))
-                                rd_kafka_topic_partition_list_update(
-                                        partitions,
-                                        rko->rko_u.offset_fetch.partitions);
-                        else if ((err == RD_KAFKA_RESP_ERR__WAIT_COORD ||
-				    err == RD_KAFKA_RESP_ERR__TRANSPORT) &&
-				   !rd_kafka_brokers_wait_state_change(
-					   rk, state_version,
-					   rd_timeout_remains(abs_timeout)))
-				err = RD_KAFKA_RESP_ERR__TIMED_OUT;
-
-                        rd_kafka_op_destroy(rko);
-                } else
-                        err = RD_KAFKA_RESP_ERR__TIMED_OUT;
-        } while (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
-		 err == RD_KAFKA_RESP_ERR__WAIT_COORD);
-
-        rd_kafka_q_destroy(rkq);
-
-        return err;
-}
-
-
-
-rd_kafka_resp_err_t
-rd_kafka_position (rd_kafka_t *rk,
-		   rd_kafka_topic_partition_list_t *partitions) {
- 	int i;
-
-	/* Set default offsets. */
-	rd_kafka_topic_partition_list_reset_offsets(partitions,
-						    RD_KAFKA_OFFSET_INVALID);
-
-	for (i = 0 ; i < partitions->cnt ; i++) {
-		rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
-		shptr_rd_kafka_toppar_t *s_rktp;
-		rd_kafka_toppar_t *rktp;
-
-		if (!(s_rktp = rd_kafka_toppar_get2(rk, rktpar->topic,
-						    rktpar->partition, 0, 1))) {
-			rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-			rktpar->offset = RD_KAFKA_OFFSET_INVALID;
-			continue;
-		}
-
-		rktp = rd_kafka_toppar_s2i(s_rktp);
-		rd_kafka_toppar_lock(rktp);
-		rktpar->offset = rktp->rktp_app_offset;
-		rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
-		rd_kafka_toppar_unlock(rktp);
-		rd_kafka_toppar_destroy(s_rktp);
-	}
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-struct _query_wmark_offsets_state {
-	rd_kafka_resp_err_t err;
-	const char *topic;
-	int32_t partition;
-	int64_t offsets[2];
-	int     offidx;  /* next offset to set from response */
-	rd_ts_t ts_end;
-	int     state_version;  /* Broker state version */
-};
-
-static void rd_kafka_query_wmark_offsets_resp_cb (rd_kafka_t *rk,
-						  rd_kafka_broker_t *rkb,
-						  rd_kafka_resp_err_t err,
-						  rd_kafka_buf_t *rkbuf,
-						  rd_kafka_buf_t *request,
-						  void *opaque) {
-	struct _query_wmark_offsets_state *state = opaque;
-        rd_kafka_topic_partition_list_t *offsets;
-        rd_kafka_topic_partition_t *rktpar;
-
-        offsets = rd_kafka_topic_partition_list_new(1);
-        err = rd_kafka_handle_Offset(rk, rkb, err, rkbuf, request, offsets);
-        if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) {
-                rd_kafka_topic_partition_list_destroy(offsets);
-                return; /* Retrying */
-        }
-
-	/* Retry if no broker connection is available yet. */
-	if ((err == RD_KAFKA_RESP_ERR__WAIT_COORD ||
-	     err == RD_KAFKA_RESP_ERR__TRANSPORT) &&
-	    rkb &&
-	    rd_kafka_brokers_wait_state_change(
-		    rkb->rkb_rk, state->state_version,
-		    rd_timeout_remains(state->ts_end))) {
-		/* Retry */
-		state->state_version = rd_kafka_brokers_get_state_version(rk);
-		request->rkbuf_retries = 0;
-		if (rd_kafka_buf_retry(rkb, request)) {
-                        rd_kafka_topic_partition_list_destroy(offsets);
-                        return; /* Retry in progress */
-                }
-		/* FALLTHRU */
-	}
-
-        /* Partition not seen in response. */
-        if (!(rktpar = rd_kafka_topic_partition_list_find(offsets,
-                                                          state->topic,
-                                                          state->partition)))
-                err = RD_KAFKA_RESP_ERR__BAD_MSG;
-        else if (rktpar->err)
-                err = rktpar->err;
-        else
-                state->offsets[state->offidx] = rktpar->offset;
-
-        state->offidx++;
-
-        if (err || state->offidx == 2) /* Error or Done */
-                state->err = err;
-
-        rd_kafka_topic_partition_list_destroy(offsets);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic,
-                                  int32_t partition,
-                                  int64_t *low, int64_t *high, int timeout_ms) {
-        rd_kafka_q_t *rkq;
-        struct _query_wmark_offsets_state state;
-        rd_ts_t ts_end = rd_timeout_init(timeout_ms);
-        rd_kafka_topic_partition_list_t *partitions;
-        rd_kafka_topic_partition_t *rktpar;
-        struct rd_kafka_partition_leader *leader;
-        rd_list_t leaders;
-        rd_kafka_resp_err_t err;
-
-        partitions = rd_kafka_topic_partition_list_new(1);
-        rktpar = rd_kafka_topic_partition_list_add(partitions,
-                                                   topic, partition);
-
-        rd_list_init(&leaders, partitions->cnt,
-                     (void *)rd_kafka_partition_leader_destroy);
-
-        err = rd_kafka_topic_partition_list_query_leaders(rk, partitions,
-                                                          &leaders, timeout_ms);
-        if (err) {
-                         rd_list_destroy(&leaders);
-                         rd_kafka_topic_partition_list_destroy(partitions);
-                         return err;
-        }
-
-        leader = rd_list_elem(&leaders, 0);
-
-        rkq = rd_kafka_q_new(rk);
-
-        /* Due to KAFKA-1588 we need to send a request for each wanted offset,
-         * in this case one for the low watermark and one for the high. */
-        state.topic = topic;
-        state.partition = partition;
-        state.offsets[0] = RD_KAFKA_OFFSET_BEGINNING;
-        state.offsets[1] = RD_KAFKA_OFFSET_END;
-        state.offidx = 0;
-        state.err = RD_KAFKA_RESP_ERR__IN_PROGRESS;
-        state.ts_end = ts_end;
-        state.state_version = rd_kafka_brokers_get_state_version(rk);
-
-
-        rktpar->offset =  RD_KAFKA_OFFSET_BEGINNING;
-        rd_kafka_OffsetRequest(leader->rkb, partitions, 0,
-                               RD_KAFKA_REPLYQ(rkq, 0),
-                               rd_kafka_query_wmark_offsets_resp_cb,
-                               &state);
-
-        rktpar->offset =  RD_KAFKA_OFFSET_END;
-        rd_kafka_OffsetRequest(leader->rkb, partitions, 0,
-                               RD_KAFKA_REPLYQ(rkq, 0),
-                               rd_kafka_query_wmark_offsets_resp_cb,
-                               &state);
-
-        rd_kafka_topic_partition_list_destroy(partitions);
-        rd_list_destroy(&leaders);
-
-        /* Wait for reply (or timeout) */
-        while (state.err == RD_KAFKA_RESP_ERR__IN_PROGRESS &&
-               rd_kafka_q_serve(rkq, 100, 0, RD_KAFKA_Q_CB_CALLBACK,
-                                rd_kafka_poll_cb, NULL) !=
-               RD_KAFKA_OP_RES_YIELD)
-                ;
-
-        rd_kafka_q_destroy(rkq);
-
-        if (state.err)
-                return state.err;
-        else if (state.offidx != 2)
-                return RD_KAFKA_RESP_ERR__FAIL;
-
-        /* We are not certain about the returned order. */
-        if (state.offsets[0] < state.offsets[1]) {
-                *low = state.offsets[0];
-                *high  = state.offsets[1];
-        } else {
-                *low = state.offsets[1];
-                *high = state.offsets[0];
-        }
-
-        /* If partition is empty only one offset (the last) will be returned. */
-        if (*low < 0 && *high >= 0)
-                *low = *high;
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_get_watermark_offsets (rd_kafka_t *rk, const char *topic,
-				int32_t partition,
-				int64_t *low, int64_t *high) {
-	shptr_rd_kafka_toppar_t *s_rktp;
-	rd_kafka_toppar_t *rktp;
-
-	s_rktp = rd_kafka_toppar_get2(rk, topic, partition, 0, 1);
-	if (!s_rktp)
-		return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-	rktp = rd_kafka_toppar_s2i(s_rktp);
-
-	rd_kafka_toppar_lock(rktp);
-	*low = rktp->rktp_lo_offset;
-	*high = rktp->rktp_hi_offset;
-	rd_kafka_toppar_unlock(rktp);
-
-	rd_kafka_toppar_destroy(s_rktp);
-
-	return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief get_offsets_for_times() state
- */
-struct _get_offsets_for_times {
-        rd_kafka_topic_partition_list_t *results;
-        rd_kafka_resp_err_t err;
-        int wait_reply;
-        int state_version;
-        rd_ts_t ts_end;
-};
-
-/**
- * @brief Handle OffsetRequest responses
- */
-static void rd_kafka_get_offsets_for_times_resp_cb (rd_kafka_t *rk,
-                                                  rd_kafka_broker_t *rkb,
-                                                  rd_kafka_resp_err_t err,
-                                                  rd_kafka_buf_t *rkbuf,
-                                                  rd_kafka_buf_t *request,
-                                                  void *opaque) {
-        struct _get_offsets_for_times *state = opaque;
-
-        err = rd_kafka_handle_Offset(rk, rkb, err, rkbuf, request,
-                                     state->results);
-        if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS)
-                return; /* Retrying */
-
-        /* Retry if no broker connection is available yet. */
-        if ((err == RD_KAFKA_RESP_ERR__WAIT_COORD ||
-             err == RD_KAFKA_RESP_ERR__TRANSPORT) &&
-            rkb &&
-            rd_kafka_brokers_wait_state_change(
-                    rkb->rkb_rk, state->state_version,
-                    rd_timeout_remains(state->ts_end))) {
-                /* Retry */
-                state->state_version = rd_kafka_brokers_get_state_version(rk);
-                request->rkbuf_retries = 0;
-                if (rd_kafka_buf_retry(rkb, request))
-                        return; /* Retry in progress */
-                /* FALLTHRU */
-        }
-
-        if (err && !state->err)
-                state->err = err;
-
-        state->wait_reply--;
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_offsets_for_times (rd_kafka_t *rk,
-                            rd_kafka_topic_partition_list_t *offsets,
-                            int timeout_ms) {
-        rd_kafka_q_t *rkq;
-        struct _get_offsets_for_times state = RD_ZERO_INIT;
-        rd_ts_t ts_end = rd_timeout_init(timeout_ms);
-        rd_list_t leaders;
-        int i;
-        rd_kafka_resp_err_t err;
-        struct rd_kafka_partition_leader *leader;
-
-        if (offsets->cnt == 0)
-                return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
-        rd_list_init(&leaders, offsets->cnt,
-                     (void *)rd_kafka_partition_leader_destroy);
-
-        err = rd_kafka_topic_partition_list_query_leaders(rk, offsets, &leaders,
-                                                          timeout_ms);
-        if (err) {
-                rd_list_destroy(&leaders);
-                return err;
-        }
-
-
-        rkq = rd_kafka_q_new(rk);
-
-        state.wait_reply = 0;
-        state.results = rd_kafka_topic_partition_list_new(offsets->cnt);
-
-        /* For each leader send a request for its partitions */
-        RD_LIST_FOREACH(leader, &leaders, i) {
-                state.wait_reply++;
-                rd_kafka_OffsetRequest(leader->rkb, leader->partitions, 1,
-                                       RD_KAFKA_REPLYQ(rkq, 0),
-                                       rd_kafka_get_offsets_for_times_resp_cb,
-                                       &state);
-        }
-
-        rd_list_destroy(&leaders);
-
-        /* Wait for reply (or timeout) */
-        while (state.wait_reply > 0 && rd_timeout_remains(ts_end) > 0)
-                rd_kafka_q_serve(rkq, rd_timeout_remains(ts_end),
-                                0, RD_KAFKA_Q_CB_CALLBACK,
-                                 rd_kafka_poll_cb, NULL);
-
-        rd_kafka_q_destroy(rkq);
-
-        /* Then update the queried partitions. */
-        if (!state.err)
-                rd_kafka_topic_partition_list_update(offsets, state.results);
-
-        rd_kafka_topic_partition_list_destroy(state.results);
-
-        return state.err;
-}
-
-
-/**
- * rd_kafka_poll() (and similar) op callback handler.
- * Will either call registered callback depending on cb_type and op type
- * or return op to application, if applicable (e.g., fetch message).
- *
- * Returns 1 if op was handled, else 0.
- *
- * Locality: application thread
- */
-rd_kafka_op_res_t
-rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko,
-                  rd_kafka_q_cb_type_t cb_type, void *opaque) {
-	rd_kafka_msg_t *rkm;
-
-	/* Return-as-event requested, see if op can be converted to event,
-	 * otherwise fall through and trigger callbacks. */
-	if (cb_type == RD_KAFKA_Q_CB_EVENT && rd_kafka_event_setup(rk, rko))
-		return 0; /* Return as event */
-
-        switch ((int)rko->rko_type)
-        {
-        case RD_KAFKA_OP_FETCH:
-                if (!rk->rk_conf.consume_cb ||
-                    cb_type == RD_KAFKA_Q_CB_RETURN ||
-                    cb_type == RD_KAFKA_Q_CB_FORCE_RETURN)
-                        return RD_KAFKA_OP_RES_PASS; /* Dont handle here */
-                else {
-                        struct consume_ctx ctx = {
-                                .consume_cb = rk->rk_conf.consume_cb,
-                                .opaque = rk->rk_conf.opaque };
-
-                        return rd_kafka_consume_cb(rk, rkq, rko, cb_type, &ctx);
-                }
-                break;
-
-        case RD_KAFKA_OP_REBALANCE:
-                /* If EVENT_REBALANCE is enabled but rebalance_cb isnt
-                 * we need to perform a dummy assign for the application.
-                 * This might happen during termination with consumer_close() */
-                if (rk->rk_conf.rebalance_cb)
-                        rk->rk_conf.rebalance_cb(
-                                rk, rko->rko_err,
-                                rko->rko_u.rebalance.partitions,
-                                rk->rk_conf.opaque);
-                else {
-                        rd_kafka_dbg(rk, CGRP, "UNASSIGN",
-                                     "Forcing unassign of %d partition(s)",
-                                     rko->rko_u.rebalance.partitions ?
-                                     rko->rko_u.rebalance.partitions->cnt : 0);
-                        rd_kafka_assign(rk, NULL);
-                }
-                break;
-
-        case RD_KAFKA_OP_OFFSET_COMMIT | RD_KAFKA_OP_REPLY:
-		if (!rko->rko_u.offset_commit.cb)
-			return RD_KAFKA_OP_RES_PASS; /* Dont handle here */
-		rko->rko_u.offset_commit.cb(
-                        rk, rko->rko_err,
-			rko->rko_u.offset_commit.partitions,
-			rko->rko_u.offset_commit.opaque);
-                break;
-
-        case RD_KAFKA_OP_CONSUMER_ERR:
-                /* rd_kafka_consumer_poll() (_Q_CB_CONSUMER):
-                 *   Consumer errors are returned to the application
-                 *   as rkmessages, not error callbacks.
-                 *
-                 * rd_kafka_poll() (_Q_CB_GLOBAL):
-                 *   convert to ERR op (fallthru)
-                 */
-                if (cb_type == RD_KAFKA_Q_CB_RETURN ||
-                    cb_type == RD_KAFKA_Q_CB_FORCE_RETURN) {
-                        /* return as message_t to application */
-                        return RD_KAFKA_OP_RES_PASS;
-                }
-		/* FALLTHRU */
-
-	case RD_KAFKA_OP_ERR:
-		if (rk->rk_conf.error_cb)
-			rk->rk_conf.error_cb(rk, rko->rko_err,
-					     rko->rko_u.err.errstr,
-                                             rk->rk_conf.opaque);
-		else
-			rd_kafka_log(rk, LOG_ERR, "ERROR",
-				     "%s: %s: %s",
-				     rk->rk_name,
-				     rd_kafka_err2str(rko->rko_err),
-				     rko->rko_u.err.errstr);
-		break;
-
-	case RD_KAFKA_OP_DR:
-		/* Delivery report:
-		 * call application DR callback for each message. */
-		while ((rkm = TAILQ_FIRST(&rko->rko_u.dr.msgq.rkmq_msgs))) {
-                        rd_kafka_message_t *rkmessage;
-
-			TAILQ_REMOVE(&rko->rko_u.dr.msgq.rkmq_msgs,
-				     rkm, rkm_link);
-
-                        rkmessage = rd_kafka_message_get_from_rkm(rko, rkm);
-
-                        if (rk->rk_conf.dr_msg_cb) {
-                                rk->rk_conf.dr_msg_cb(rk, rkmessage,
-                                                      rk->rk_conf.opaque);
-
-                        } else {
-
-                                rk->rk_conf.dr_cb(rk,
-                                                  rkmessage->payload,
-                                                  rkmessage->len,
-                                                  rkmessage->err,
-                                                  rk->rk_conf.opaque,
-                                                  rkmessage->_private);
-                        }
-
-                        rd_kafka_msg_destroy(rk, rkm);
-
-                        if (unlikely(rd_kafka_yield_thread)) {
-                                /* Callback called yield(),
-                                 * re-enqueue the op (if there are any
-                                 * remaining messages). */
-                                if (!TAILQ_EMPTY(&rko->rko_u.dr.msgq.
-                                                 rkmq_msgs))
-                                        rd_kafka_q_reenq(rkq, rko);
-                                else
-                                        rd_kafka_op_destroy(rko);
-                                return RD_KAFKA_OP_RES_YIELD;
-                        }
-		}
-
-		rd_kafka_msgq_init(&rko->rko_u.dr.msgq);
-
-		break;
-
-	case RD_KAFKA_OP_THROTTLE:
-		if (rk->rk_conf.throttle_cb)
-			rk->rk_conf.throt

<TRUNCATED>

[10/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/Doxyfile
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/Doxyfile b/thirdparty/librdkafka-0.11.4/Doxyfile
new file mode 100644
index 0000000..8e94e12
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/Doxyfile
@@ -0,0 +1,2385 @@
+# Doxyfile 1.8.9.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME           = "librdkafka"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER         =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF          = "The Apache Kafka C/C++ client library"
+
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
+
+#PROJECT_LOGO           = kafka_logo.png
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = staging-docs
+
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS         = NO
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES    = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE        = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF       =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES        = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH        =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH    =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF      = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE               = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES                = "locality=@par Thread restriction:"
+ALIASES               += "locks=@par Lock restriction:"
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST              =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C  = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN   = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+# Fortran. In the later case the parser tries to guess whether the code is fixed
+# or free formatted code, this is the default for Fortran type files), VHDL. For
+# instance to make doxygen treat .inc files as Fortran files (default is PHP),
+# and .f files as C (default is Fortran), use: inc=Fortran f=C.
+#
+# Note: For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING      =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT       = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT       = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT    = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING            = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS  = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT   = YES
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE      = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL            = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC         = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. If set to YES, local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS  = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES   = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO, these classes will be included in the various overviews. This option
+# has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO, these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO, these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS          = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES, upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES       = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES, the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC  = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES   = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS       = NO
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME     = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING  = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS       =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES, the
+# list will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES        = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES        = NO
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER    =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE            =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. See also \cite for info how to create references.
+
+CITE_BIB_FILES         =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS               = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR      = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO, doxygen will only warn about wrong or incomplete
+# parameter documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC       = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE           =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
+
+INPUT                  = mainpage.doxy INTRODUCTION.md CONFIGURATION.md src/rdkafka.h src-cpp/rdkafkacpp.h
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
+
+FILE_PATTERNS          =
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE              = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE                =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS        =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH           =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS       =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH             =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER           =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS        =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES    = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION    = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS        = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS       = YES
+
+# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
+# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
+# cost of reduced performance. This can be particularly helpful with template
+# rich C++ code for which doxygen's built-in parser lacks the necessary type
+# information.
+# Note: The availability of this option depends on whether or not doxygen was
+# compiled with the --with-libclang option.
+# The default value is: NO.
+
+CLANG_ASSISTED_PARSING = NO
+
+# If clang assisted parsing is enabled you can provide the compiler with command
+# line options that you would normally use when invoking the compiler. Note that
+# the include paths will already be set by doxygen for the files and directories
+# specified with INPUT and INCLUDE_PATH.
+# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
+
+CLANG_OPTIONS          =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX     = YES
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX          =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER            =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER            =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET        =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET  =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES       =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the style sheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE    = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT    = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA  = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to YES can help to show when doxygen was last run and thus if the
+# documentation is up to date.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP         = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS  = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET        = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME        = "librdkafka documentation"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID       = se.edenhill.librdkafka
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID    = se.edenhill
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME  = Magnus Edenhill
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP      = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE               =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION           =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the master .chm file (NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI           = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING     =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND             = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE               =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE          = se.edenhill.librdkafka
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME   =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS  =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS  =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION           =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP   = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID         = se.edenhill.librdkafka
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX          = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW      = YES
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE   = 1
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH         = 250
+
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW    = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE       = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT    = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX            = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT         = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS     =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE       =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE           = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH    = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH        = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL       =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE        = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID     =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS  =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX         = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE             = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES         =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+# string, for the replacement values of the other commands the user is referred
+# to HTML_HEADER.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER           =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER           =
+
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES      =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS         = YES
+
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES, to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX           = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE        = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES     = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE      = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE        = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE    =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE    =
+
+# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
+# with syntax highlighting in the RTF output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_SOURCE_CODE        = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION          = .3
+
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR             =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT             = xml
+
+# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING     = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK       = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT         = docbook
+
+# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
+# program listings (including syntax highlighting and cross-referencing
+# information) to the DOCBOOK output. Note that enabling this will significantly
+# increase the size of the DOCBOOK output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_PROGRAMLISTING = NO
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
+# AutoGen Definitions (see http://autogen.sf.net) file that captures the
+# structure of the code including all documentation. Note that this feature is
+# still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO, the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
+# in the source code. If set to NO, only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION        = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES, the include files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCL

<TRUNCATED>

[18/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl.c
deleted file mode 100644
index 9ece5cb..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl.c
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_sasl.h"
-#include "rdkafka_sasl_int.h"
-
-
- /**
- * Send auth message with framing.
- * This is a blocking call.
- */
-int rd_kafka_sasl_send (rd_kafka_transport_t *rktrans,
-                        const void *payload, int len,
-                        char *errstr, size_t errstr_size) {
-        rd_buf_t buf;
-        rd_slice_t slice;
-	int32_t hdr;
-
-	rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
-		   "Send SASL frame to broker (%d bytes)", len);
-
-        rd_buf_init(&buf, 1+1, sizeof(hdr));
-
-	hdr = htobe32(len);
-        rd_buf_write(&buf, &hdr, sizeof(hdr));
-	if (payload)
-                rd_buf_push(&buf, payload, len, NULL);
-
-        rd_slice_init_full(&slice, &buf);
-
-	/* Simulate blocking behaviour on non-blocking socket..
-	 * FIXME: This isn't optimal but is highly unlikely to stall since
-	 *        the socket buffer will most likely not be exceeded. */
-	do {
-		int r;
-
-		r = (int)rd_kafka_transport_send(rktrans, &slice,
-                                                 errstr, errstr_size);
-		if (r == -1) {
-			rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
-				   "SASL send failed: %s", errstr);
-                        rd_buf_destroy(&buf);
-			return -1;
-		}
-
-                if (rd_slice_remains(&slice) == 0)
-                        break;
-
-		/* Avoid busy-looping */
-		rd_usleep(10*1000, NULL);
-
-	} while (1);
-
-        rd_buf_destroy(&buf);
-
-	return 0;
-}
-
-
-/**
- * @brief Authentication succesful
- *
- * Transition to next connect state.
- */
-void rd_kafka_sasl_auth_done (rd_kafka_transport_t *rktrans) {
-        /* Authenticated */
-        rd_kafka_broker_connect_up(rktrans->rktrans_rkb);
-}
-
-
-int rd_kafka_sasl_io_event (rd_kafka_transport_t *rktrans, int events,
-                            char *errstr, size_t errstr_size) {
-        rd_kafka_buf_t *rkbuf;
-        int r;
-        const void *buf;
-        size_t len;
-
-        if (!(events & POLLIN))
-                return 0;
-
-        r = rd_kafka_transport_framed_recv(rktrans, &rkbuf,
-                                           errstr, errstr_size);
-        if (r == -1) {
-                if (!strcmp(errstr, "Disconnected"))
-                        rd_snprintf(errstr, errstr_size,
-                                    "Disconnected: check client %s credentials "
-                                    "and broker logs",
-                                    rktrans->rktrans_rkb->rkb_rk->rk_conf.
-                                    sasl.mechanisms);
-                return -1;
-        } else if (r == 0) /* not fully received yet */
-                return 0;
-
-        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
-                   "Received SASL frame from broker (%"PRIusz" bytes)",
-                   rkbuf ? rkbuf->rkbuf_totlen : 0);
-
-        if (rkbuf) {
-                rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
-                /* Seek past framing header */
-                rd_slice_seek(&rkbuf->rkbuf_reader, 4);
-                len = rd_slice_remains(&rkbuf->rkbuf_reader);
-                buf = rd_slice_ensure_contig(&rkbuf->rkbuf_reader, len);
-        } else {
-                buf = NULL;
-                len = 0;
-        }
-
-        r = rktrans->rktrans_rkb->rkb_rk->
-                rk_conf.sasl.provider->recv(rktrans, buf, len,
-                                            errstr, errstr_size);
-        rd_kafka_buf_destroy(rkbuf);
-
-        return r;
-}
-
-
-/**
- * @brief Close SASL session (from transport code)
- * @remark May be called on non-SASL transports (no-op)
- */
-void rd_kafka_sasl_close (rd_kafka_transport_t *rktrans) {
-        const struct rd_kafka_sasl_provider *provider =
-                rktrans->rktrans_rkb->rkb_rk->rk_conf.
-                sasl.provider;
-
-        if (provider && provider->close)
-                provider->close(rktrans);
-}
-
-
-
-/**
- * Initialize and start SASL authentication.
- *
- * Returns 0 on successful init and -1 on error.
- *
- * Locality: broker thread
- */
-int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans,
-			      char *errstr, size_t errstr_size) {
-	int r;
-	rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
-	rd_kafka_t *rk = rkb->rkb_rk;
-        char *hostname, *t;
-        const struct rd_kafka_sasl_provider *provider =
-                rk->rk_conf.sasl.provider;
-
-        /* Verify broker support:
-         * - RD_KAFKA_FEATURE_SASL_GSSAPI - GSSAPI supported
-         * - RD_KAFKA_FEATURE_SASL_HANDSHAKE - GSSAPI, PLAIN and possibly
-         *   other mechanisms supported. */
-        if (!strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) {
-                if (!(rkb->rkb_features & RD_KAFKA_FEATURE_SASL_GSSAPI)) {
-                        rd_snprintf(errstr, errstr_size,
-                                    "SASL GSSAPI authentication not supported "
-                                    "by broker");
-                        return -1;
-                }
-        } else if (!(rkb->rkb_features & RD_KAFKA_FEATURE_SASL_HANDSHAKE)) {
-                rd_snprintf(errstr, errstr_size,
-                            "SASL Handshake not supported by broker "
-                            "(required by mechanism %s)%s",
-                            rk->rk_conf.sasl.mechanisms,
-                            rk->rk_conf.api_version_request ? "" :
-                            ": try api.version.request=true");
-                return -1;
-        }
-
-        rd_strdupa(&hostname, rktrans->rktrans_rkb->rkb_nodename);
-        if ((t = strchr(hostname, ':')))
-                *t = '\0';  /* remove ":port" */
-
-        rd_rkb_dbg(rkb, SECURITY, "SASL",
-                   "Initializing SASL client: service name %s, "
-                   "hostname %s, mechanisms %s, provider %s",
-                   rk->rk_conf.sasl.service_name, hostname,
-                   rk->rk_conf.sasl.mechanisms,
-                   provider->name);
-
-        r = provider->client_new(rktrans, hostname, errstr, errstr_size);
-        if (r != -1)
-                rd_kafka_transport_poll_set(rktrans, POLLIN);
-
-        return r;
-}
-
-
-
-
-
-
-
-/**
- * Per handle SASL term.
- *
- * Locality: broker thread
- */
-void rd_kafka_sasl_broker_term (rd_kafka_broker_t *rkb) {
-        const struct rd_kafka_sasl_provider *provider =
-                rkb->rkb_rk->rk_conf.sasl.provider;
-        if (provider->broker_term)
-                provider->broker_term(rkb);
-}
-
-/**
- * Broker SASL init.
- *
- * Locality: broker thread
- */
-void rd_kafka_sasl_broker_init (rd_kafka_broker_t *rkb) {
-        const struct rd_kafka_sasl_provider *provider =
-                rkb->rkb_rk->rk_conf.sasl.provider;
-        if (provider->broker_init)
-                provider->broker_init(rkb);
-}
-
-
-
-/**
- * @brief Select SASL provider for configured mechanism (singularis)
- * @returns 0 on success or -1 on failure.
- */
-int rd_kafka_sasl_select_provider (rd_kafka_t *rk,
-                                   char *errstr, size_t errstr_size) {
-        const struct rd_kafka_sasl_provider *provider = NULL;
-
-        if (!strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) {
-                /* GSSAPI / Kerberos */
-#ifdef _MSC_VER
-                provider = &rd_kafka_sasl_win32_provider;
-#elif WITH_SASL_CYRUS
-                provider = &rd_kafka_sasl_cyrus_provider;
-#endif
-
-        } else if (!strcmp(rk->rk_conf.sasl.mechanisms, "PLAIN")) {
-                /* SASL PLAIN */
-                provider = &rd_kafka_sasl_plain_provider;
-
-        } else if (!strncmp(rk->rk_conf.sasl.mechanisms, "SCRAM-SHA-",
-                            strlen("SCRAM-SHA-"))) {
-                /* SASL SCRAM */
-#if WITH_SASL_SCRAM
-                provider = &rd_kafka_sasl_scram_provider;
-#endif
-
-        } else {
-                /* Unsupported mechanism */
-                rd_snprintf(errstr, errstr_size,
-                            "Unsupported SASL mechanism: %s",
-                            rk->rk_conf.sasl.mechanisms);
-                return -1;
-        }
-
-        if (!provider) {
-                rd_snprintf(errstr, errstr_size,
-                            "No provider for SASL mechanism %s"
-                            ": recompile librdkafka with "
-#ifndef _MSC_VER
-                            "libsasl2 or "
-#endif
-                            "openssl support. "
-                            "Current build options:"
-                            " PLAIN"
-#ifdef _MSC_VER
-                            " WindowsSSPI(GSSAPI)"
-#endif
-#if WITH_SASL_CYRUS
-                            " SASL_CYRUS"
-#endif
-#if WITH_SASL_SCRAM
-                            " SASL_SCRAM"
-#endif
-                            ,
-                            rk->rk_conf.sasl.mechanisms);
-                return -1;
-        }
-
-        rd_kafka_dbg(rk, SECURITY, "SASL",
-                     "Selected provider %s for SASL mechanism %s",
-                     provider->name, rk->rk_conf.sasl.mechanisms);
-
-        /* Validate SASL config */
-        if (provider->conf_validate &&
-            provider->conf_validate(rk, errstr, errstr_size) == -1)
-                return -1;
-
-        rk->rk_conf.sasl.provider = provider;
-
-        return 0;
-}
-
-
-
-/**
- * Global SASL termination.
- */
-void rd_kafka_sasl_global_term (void) {
-#if WITH_SASL_CYRUS
-        rd_kafka_sasl_cyrus_global_term();
-#endif
-}
-
-
-/**
- * Global SASL init, called once per runtime.
- */
-int rd_kafka_sasl_global_init (void) {
-#if WITH_SASL_CYRUS
-        return rd_kafka_sasl_cyrus_global_init();
-#else
-        return 0;
-#endif
-}
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl.h
deleted file mode 100644
index 496e04e..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-
-
-int rd_kafka_sasl_io_event (rd_kafka_transport_t *rktrans, int events,
-			    char *errstr, size_t errstr_size);
-void rd_kafka_sasl_close (rd_kafka_transport_t *rktrans);
-int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans,
-			      char *errstr, size_t errstr_size);
-
-void rd_kafka_sasl_broker_term (rd_kafka_broker_t *rkb);
-void rd_kafka_sasl_broker_init (rd_kafka_broker_t *rkb);
-
-void rd_kafka_sasl_global_term (void);
-int rd_kafka_sasl_global_init (void);
-
-int rd_kafka_sasl_select_provider (rd_kafka_t *rk,
-                                   char *errstr, size_t errstr_size);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_cyrus.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_cyrus.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_cyrus.c
deleted file mode 100644
index 35b3183..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_cyrus.c
+++ /dev/null
@@ -1,623 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_sasl.h"
-#include "rdkafka_sasl_int.h"
-#include "rdstring.h"
-
-#ifdef __FreeBSD__
-#include <sys/wait.h>  /* For WIF.. */
-#endif
-
-#ifdef __APPLE__
-/* Apple has deprecated most of the SASL API for unknown reason,
- * silence those warnings. */
-#pragma clang diagnostic ignored "-Wdeprecated-declarations"
-#endif
-
-#include <sasl/sasl.h>
-
-static mtx_t rd_kafka_sasl_cyrus_kinit_lock;
-
-typedef struct rd_kafka_sasl_cyrus_state_s {
-        sasl_conn_t *conn;
-        sasl_callback_t callbacks[16];
-} rd_kafka_sasl_cyrus_state_t;
-
-
-
-/**
- * Handle received frame from broker.
- */
-static int rd_kafka_sasl_cyrus_recv (struct rd_kafka_transport_s *rktrans,
-                                     const void *buf, size_t size,
-                                     char *errstr, size_t errstr_size) {
-        rd_kafka_sasl_cyrus_state_t *state = rktrans->rktrans_sasl.state;
-        int r;
-
-        if (rktrans->rktrans_sasl.complete && size == 0)
-                goto auth_successful;
-
-        do {
-                sasl_interact_t *interact = NULL;
-                const char *out;
-                unsigned int outlen;
-
-                r = sasl_client_step(state->conn,
-                                     size > 0 ? buf : NULL, size,
-                                     &interact,
-                                     &out, &outlen);
-
-                if (r >= 0) {
-                        /* Note: outlen may be 0 here for an empty response */
-                        if (rd_kafka_sasl_send(rktrans, out, outlen,
-                                               errstr, errstr_size) == -1)
-                                return -1;
-                }
-
-                if (r == SASL_INTERACT)
-                        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
-                                   "SASL_INTERACT: %lu %s, %s, %s, %p",
-                                   interact->id,
-                                   interact->challenge,
-                                   interact->prompt,
-                                   interact->defresult,
-                                   interact->result);
-
-        } while (r == SASL_INTERACT);
-
-        if (r == SASL_CONTINUE)
-                return 0;  /* Wait for more data from broker */
-        else if (r != SASL_OK) {
-                rd_snprintf(errstr, errstr_size,
-                            "SASL handshake failed (step): %s",
-                            sasl_errdetail(state->conn));
-                return -1;
-        }
-
-        /* Authentication successful */
-auth_successful:
-        if (rktrans->rktrans_rkb->rkb_rk->rk_conf.debug &
-            RD_KAFKA_DBG_SECURITY) {
-                const char *user, *mech, *authsrc;
-
-                if (sasl_getprop(state->conn, SASL_USERNAME,
-                                 (const void **)&user) != SASL_OK)
-                        user = "(unknown)";
-
-                if (sasl_getprop(state->conn, SASL_MECHNAME,
-                                 (const void **)&mech) != SASL_OK)
-                        mech = "(unknown)";
-
-                if (sasl_getprop(state->conn, SASL_AUTHSOURCE,
-                                 (const void **)&authsrc) != SASL_OK)
-                        authsrc = "(unknown)";
-
-                rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
-                           "Authenticated as %s using %s (%s)",
-                           user, mech, authsrc);
-        }
-
-        rd_kafka_sasl_auth_done(rktrans);
-
-        return 0;
-}
-
-
-
-
-static ssize_t render_callback (const char *key, char *buf,
-                                size_t size, void *opaque) {
-        rd_kafka_broker_t *rkb = opaque;
-
-        if (!strcmp(key, "broker.name")) {
-                char *val, *t;
-                size_t len;
-                rd_kafka_broker_lock(rkb);
-                rd_strdupa(&val, rkb->rkb_nodename);
-                rd_kafka_broker_unlock(rkb);
-
-                /* Just the broker name, no port */
-                if ((t = strchr(val, ':')))
-                        len = (size_t)(t-val);
-                else
-                        len = strlen(val);
-
-                if (buf)
-                        memcpy(buf, val, RD_MIN(len, size));
-
-                return len;
-
-        } else {
-                rd_kafka_conf_res_t res;
-                size_t destsize = size;
-
-                /* Try config lookup. */
-                res = rd_kafka_conf_get(&rkb->rkb_rk->rk_conf, key,
-                                        buf, &destsize);
-                if (res != RD_KAFKA_CONF_OK)
-                        return -1;
-
-                /* Dont include \0 in returned size */
-                return (destsize > 0 ? destsize-1 : destsize);
-        }
-}
-
-
-/**
- * Execute kinit to refresh ticket.
- *
- * Returns 0 on success, -1 on error.
- *
- * Locality: any
- */
-static int rd_kafka_sasl_cyrus_kinit_refresh (rd_kafka_broker_t *rkb) {
-        rd_kafka_t *rk = rkb->rkb_rk;
-        int r;
-        char *cmd;
-        char errstr[128];
-
-        if (!rk->rk_conf.sasl.kinit_cmd ||
-            !strstr(rk->rk_conf.sasl.mechanisms, "GSSAPI"))
-                return 0; /* kinit not configured */
-
-        /* Build kinit refresh command line using string rendering and config */
-        cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd,
-                               errstr, sizeof(errstr),
-                               render_callback, rkb);
-        if (!cmd) {
-                rd_rkb_log(rkb, LOG_ERR, "SASLREFRESH",
-                           "Failed to construct kinit command "
-                           "from sasl.kerberos.kinit.cmd template: %s",
-                           errstr);
-                return -1;
-        }
-
-        /* Execute kinit */
-        rd_rkb_dbg(rkb, SECURITY, "SASLREFRESH",
-                   "Refreshing SASL keys with command: %s", cmd);
-
-        mtx_lock(&rd_kafka_sasl_cyrus_kinit_lock);
-        r = system(cmd);
-        mtx_unlock(&rd_kafka_sasl_cyrus_kinit_lock);
-
-        if (r == -1) {
-                rd_rkb_log(rkb, LOG_ERR, "SASLREFRESH",
-                           "SASL key refresh failed: Failed to execute %s",
-                           cmd);
-                rd_free(cmd);
-                return -1;
-        } else if (WIFSIGNALED(r)) {
-                rd_rkb_log(rkb, LOG_ERR, "SASLREFRESH",
-                           "SASL key refresh failed: %s: received signal %d",
-                           cmd, WTERMSIG(r));
-                rd_free(cmd);
-                return -1;
-        } else if (WIFEXITED(r) && WEXITSTATUS(r) != 0) {
-                rd_rkb_log(rkb, LOG_ERR, "SASLREFRESH",
-                           "SASL key refresh failed: %s: exited with code %d",
-                           cmd, WEXITSTATUS(r));
-                rd_free(cmd);
-                return -1;
-        }
-
-        rd_free(cmd);
-
-        rd_rkb_dbg(rkb, SECURITY, "SASLREFRESH", "SASL key refreshed");
-        return 0;
-}
-
-
-/**
- * Refresh timer callback
- *
- * Locality: kafka main thread
- */
-static void rd_kafka_sasl_cyrus_kinit_refresh_tmr_cb (rd_kafka_timers_t *rkts,
-                                                      void *arg) {
-        rd_kafka_broker_t *rkb = arg;
-
-        rd_kafka_sasl_cyrus_kinit_refresh(rkb);
-}
-
-
-
-/**
- *
- * libsasl callbacks
- *
- */
-static RD_UNUSED int
-rd_kafka_sasl_cyrus_cb_getopt (void *context, const char *plugin_name,
-                         const char *option,
-                         const char **result, unsigned *len) {
-        rd_kafka_transport_t *rktrans = context;
-
-        if (!strcmp(option, "client_mech_list"))
-                *result = "GSSAPI";
-        if (!strcmp(option, "canon_user_plugin"))
-                *result = "INTERNAL";
-
-        if (*result && len)
-                *len = strlen(*result);
-
-        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
-                   "CB_GETOPT: plugin %s, option %s: returning %s",
-                   plugin_name, option, *result);
-
-        return SASL_OK;
-}
-
-static int rd_kafka_sasl_cyrus_cb_log (void *context, int level, const char *message){
-        rd_kafka_transport_t *rktrans = context;
-
-        if (level >= LOG_DEBUG)
-                rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
-                           "%s", message);
-        else
-                rd_rkb_log(rktrans->rktrans_rkb, level, "LIBSASL",
-                           "%s", message);
-        return SASL_OK;
-}
-
-
-static int rd_kafka_sasl_cyrus_cb_getsimple (void *context, int id,
-                                       const char **result, unsigned *len) {
-        rd_kafka_transport_t *rktrans = context;
-
-        switch (id)
-        {
-        case SASL_CB_USER:
-        case SASL_CB_AUTHNAME:
-                *result = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.username;
-                break;
-
-        default:
-                *result = NULL;
-                break;
-        }
-
-        if (len)
-                *len = *result ? strlen(*result) : 0;
-
-        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
-                   "CB_GETSIMPLE: id 0x%x: returning %s", id, *result);
-
-        return *result ? SASL_OK : SASL_FAIL;
-}
-
-
-static int rd_kafka_sasl_cyrus_cb_getsecret (sasl_conn_t *conn, void *context,
-                                       int id, sasl_secret_t **psecret) {
-        rd_kafka_transport_t *rktrans = context;
-        const char *password;
-
-        password = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.password;
-
-        if (!password) {
-                *psecret = NULL;
-        } else {
-                size_t passlen = strlen(password);
-                *psecret = rd_realloc(*psecret, sizeof(**psecret) + passlen);
-                (*psecret)->len = passlen;
-                memcpy((*psecret)->data, password, passlen);
-        }
-
-        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
-                   "CB_GETSECRET: id 0x%x: returning %s",
-                   id, *psecret ? "(hidden)":"NULL");
-
-        return SASL_OK;
-}
-
-static int rd_kafka_sasl_cyrus_cb_chalprompt (void *context, int id,
-                                        const char *challenge,
-                                        const char *prompt,
-                                        const char *defres,
-                                        const char **result, unsigned *len) {
-        rd_kafka_transport_t *rktrans = context;
-
-        *result = "min_chalprompt";
-        *len = strlen(*result);
-
-        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
-                   "CB_CHALPROMPT: id 0x%x, challenge %s, prompt %s, "
-                   "default %s: returning %s",
-                   id, challenge, prompt, defres, *result);
-
-        return SASL_OK;
-}
-
-static int rd_kafka_sasl_cyrus_cb_getrealm (void *context, int id,
-                                      const char **availrealms,
-                                      const char **result) {
-        rd_kafka_transport_t *rktrans = context;
-
-        *result = *availrealms;
-
-        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
-                   "CB_GETREALM: id 0x%x: returning %s", id, *result);
-
-        return SASL_OK;
-}
-
-
-static RD_UNUSED int
-rd_kafka_sasl_cyrus_cb_canon (sasl_conn_t *conn,
-                              void *context,
-                              const char *in, unsigned inlen,
-                              unsigned flags,
-                              const char *user_realm,
-                              char *out, unsigned out_max,
-                              unsigned *out_len) {
-        rd_kafka_transport_t *rktrans = context;
-
-        if (strstr(rktrans->rktrans_rkb->rkb_rk->rk_conf.
-                   sasl.mechanisms, "GSSAPI")) {
-                *out_len = rd_snprintf(out, out_max, "%s",
-                                       rktrans->rktrans_rkb->rkb_rk->
-                                       rk_conf.sasl.principal);
-        } else if (!strcmp(rktrans->rktrans_rkb->rkb_rk->rk_conf.
-                           sasl.mechanisms, "PLAIN")) {
-                *out_len = rd_snprintf(out, out_max, "%.*s", inlen, in);
-        } else
-                out = NULL;
-
-        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
-                   "CB_CANON: flags 0x%x, \"%.*s\" @ \"%s\": returning \"%.*s\"",
-                   flags, (int)inlen, in, user_realm, (int)(*out_len), out);
-
-        return out ? SASL_OK : SASL_FAIL;
-}
-
-
-static void rd_kafka_sasl_cyrus_close (struct rd_kafka_transport_s *rktrans) {
-        rd_kafka_sasl_cyrus_state_t *state = rktrans->rktrans_sasl.state;
-
-        if (!state)
-                return;
-
-        if (state->conn)
-                sasl_dispose(&state->conn);
-        rd_free(state);
-}
-
-
-/**
- * Initialize and start SASL authentication.
- *
- * Returns 0 on successful init and -1 on error.
- *
- * Locality: broker thread
- */
-static int rd_kafka_sasl_cyrus_client_new (rd_kafka_transport_t *rktrans,
-                                           const char *hostname,
-                                           char *errstr, size_t errstr_size) {
-        int r;
-        rd_kafka_sasl_cyrus_state_t *state;
-        rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
-        rd_kafka_t *rk = rkb->rkb_rk;
-        sasl_callback_t callbacks[16] = {
-                // { SASL_CB_GETOPT, (void *)rd_kafka_sasl_cyrus_cb_getopt, rktrans },
-                { SASL_CB_LOG, (void *)rd_kafka_sasl_cyrus_cb_log, rktrans },
-                { SASL_CB_AUTHNAME, (void *)rd_kafka_sasl_cyrus_cb_getsimple, rktrans },
-                { SASL_CB_PASS, (void *)rd_kafka_sasl_cyrus_cb_getsecret, rktrans },
-                { SASL_CB_ECHOPROMPT, (void *)rd_kafka_sasl_cyrus_cb_chalprompt, rktrans },
-                { SASL_CB_GETREALM, (void *)rd_kafka_sasl_cyrus_cb_getrealm, rktrans },
-                { SASL_CB_CANON_USER, (void *)rd_kafka_sasl_cyrus_cb_canon, rktrans },
-                { SASL_CB_LIST_END }
-        };
-
-        state = rd_calloc(1, sizeof(*state));
-        rktrans->rktrans_sasl.state = state;
-
-        /* SASL_CB_USER is needed for PLAIN but breaks GSSAPI */
-        if (!strcmp(rk->rk_conf.sasl.mechanisms, "PLAIN")) {
-                int endidx;
-                /* Find end of callbacks array */
-                for (endidx = 0 ;
-                     callbacks[endidx].id != SASL_CB_LIST_END ; endidx++)
-                        ;
-
-                callbacks[endidx].id = SASL_CB_USER;
-                callbacks[endidx].proc = (void *)rd_kafka_sasl_cyrus_cb_getsimple;
-                callbacks[endidx].context = rktrans;
-                endidx++;
-                callbacks[endidx].id = SASL_CB_LIST_END;
-        }
-
-        memcpy(state->callbacks, callbacks, sizeof(callbacks));
-
-        /* Acquire or refresh ticket if kinit is configured */ 
-        rd_kafka_sasl_cyrus_kinit_refresh(rkb);
-
-        r = sasl_client_new(rk->rk_conf.sasl.service_name, hostname,
-                            NULL, NULL, /* no local & remote IP checks */
-                            state->callbacks, 0, &state->conn);
-        if (r != SASL_OK) {
-                rd_snprintf(errstr, errstr_size, "%s",
-                            sasl_errstring(r, NULL, NULL));
-                return -1;
-        }
-
-        if (rk->rk_conf.debug & RD_KAFKA_DBG_SECURITY) {
-                const char *avail_mechs;
-                sasl_listmech(state->conn, NULL, NULL, " ", NULL,
-                              &avail_mechs, NULL, NULL);
-                rd_rkb_dbg(rkb, SECURITY, "SASL",
-                           "My supported SASL mechanisms: %s", avail_mechs);
-        }
-
-        do {
-                const char *out;
-                unsigned int outlen;
-                const char *mech = NULL;
-
-                r = sasl_client_start(state->conn,
-                                      rk->rk_conf.sasl.mechanisms,
-                                      NULL, &out, &outlen, &mech);
-
-                if (r >= 0)
-                        if (rd_kafka_sasl_send(rktrans, out, outlen,
-                                               errstr, errstr_size))
-                                return -1;
-        } while (r == SASL_INTERACT);
-
-        if (r == SASL_OK) {
-                /* PLAIN is appearantly done here, but we still need to make sure
-                 * the PLAIN frame is sent and we get a response back (but we must
-                 * not pass the response to libsasl or it will fail). */
-                rktrans->rktrans_sasl.complete = 1;
-                return 0;
-
-        } else if (r != SASL_CONTINUE) {
-                rd_snprintf(errstr, errstr_size,
-                            "SASL handshake failed (start (%d)): %s",
-                            r, sasl_errdetail(state->conn));
-                return -1;
-        }
-
-        return 0;
-}
-
-
-
-
-
-
-
-/**
- * Per handle SASL term.
- *
- * Locality: broker thread
- */
-static void rd_kafka_sasl_cyrus_broker_term (rd_kafka_broker_t *rkb) {
-        rd_kafka_t *rk = rkb->rkb_rk;
-
-        if (!rk->rk_conf.sasl.kinit_cmd)
-                return;
-
-        rd_kafka_timer_stop(&rk->rk_timers, &rkb->rkb_sasl_kinit_refresh_tmr,1);
-}
-
-/**
- * Broker SASL init.
- *
- * Locality: broker thread
- */
-static void rd_kafka_sasl_cyrus_broker_init (rd_kafka_broker_t *rkb) {
-        rd_kafka_t *rk = rkb->rkb_rk;
-
-        if (!rk->rk_conf.sasl.kinit_cmd ||
-            !strstr(rk->rk_conf.sasl.mechanisms, "GSSAPI"))
-                return; /* kinit not configured, no need to start timer */
-
-        rd_kafka_timer_start(&rk->rk_timers, &rkb->rkb_sasl_kinit_refresh_tmr,
-                             rk->rk_conf.sasl.relogin_min_time * 1000ll,
-                             rd_kafka_sasl_cyrus_kinit_refresh_tmr_cb, rkb);
-}
-
-
-
-static int rd_kafka_sasl_cyrus_conf_validate (rd_kafka_t *rk,
-                                       char *errstr, size_t errstr_size) {
-
-        if (strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI"))
-                return 0;
-
-        if (rk->rk_conf.sasl.kinit_cmd) {
-                rd_kafka_broker_t rkb;
-                char *cmd;
-                char tmperr[128];
-
-                memset(&rkb, 0, sizeof(rkb));
-                strcpy(rkb.rkb_nodename, "ATestBroker:9092");
-                rkb.rkb_rk = rk;
-                mtx_init(&rkb.rkb_lock, mtx_plain);
-
-                cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd,
-                                       tmperr, sizeof(tmperr),
-                                       render_callback, &rkb);
-
-                mtx_destroy(&rkb.rkb_lock);
-
-                if (!cmd) {
-                        rd_snprintf(errstr, errstr_size,
-                                    "Invalid sasl.kerberos.kinit.cmd value: %s",
-                                    tmperr);
-                        return -1;
-                }
-
-                rd_free(cmd);
-        }
-
-        return 0;
-}
-
-
-/**
- * Global SASL termination.
- */
-void rd_kafka_sasl_cyrus_global_term (void) {
-        /* NOTE: Should not be called since the application may be using SASL too*/
-        /* sasl_done(); */
-        mtx_destroy(&rd_kafka_sasl_cyrus_kinit_lock);
-}
-
-
-/**
- * Global SASL init, called once per runtime.
- */
-int rd_kafka_sasl_cyrus_global_init (void) {
-        int r;
-
-        mtx_init(&rd_kafka_sasl_cyrus_kinit_lock, mtx_plain);
-
-        r = sasl_client_init(NULL);
-        if (r != SASL_OK) {
-                fprintf(stderr, "librdkafka: sasl_client_init() failed: %s\n",
-                        sasl_errstring(r, NULL, NULL));
-                return -1;
-        }
-
-        return 0;
-}
-
-
-const struct rd_kafka_sasl_provider rd_kafka_sasl_cyrus_provider = {
-        .name          = "Cyrus",
-        .client_new    = rd_kafka_sasl_cyrus_client_new,
-        .recv          = rd_kafka_sasl_cyrus_recv,
-        .close         = rd_kafka_sasl_cyrus_close,
-        .broker_init   = rd_kafka_sasl_cyrus_broker_init,
-        .broker_term   = rd_kafka_sasl_cyrus_broker_term,
-        .conf_validate = rd_kafka_sasl_cyrus_conf_validate
-};

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_int.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_int.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_int.h
deleted file mode 100644
index 699174e..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_int.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-struct rd_kafka_sasl_provider {
-        const char *name;
-
-        int (*client_new) (rd_kafka_transport_t *rktrans,
-                           const char *hostname,
-                           char *errstr, size_t errstr_size);
-
-        int (*recv) (struct rd_kafka_transport_s *s,
-                     const void *buf, size_t size,
-                     char *errstr, size_t errstr_size);
-        void (*close) (struct rd_kafka_transport_s *);
-
-        void (*broker_init) (rd_kafka_broker_t *rkb);
-        void (*broker_term) (rd_kafka_broker_t *rkb);
-
-        int (*conf_validate) (rd_kafka_t *rk,
-                              char *errstr, size_t errstr_size);
-};
-
-#ifdef _MSC_VER
-extern const struct rd_kafka_sasl_provider rd_kafka_sasl_win32_provider;
-#endif
-
-#if WITH_SASL_CYRUS
-extern const struct rd_kafka_sasl_provider rd_kafka_sasl_cyrus_provider;
-void rd_kafka_sasl_cyrus_global_term (void);
-int rd_kafka_sasl_cyrus_global_init (void);
-#endif
-
-extern const struct rd_kafka_sasl_provider rd_kafka_sasl_plain_provider;
-
-#if WITH_SASL_SCRAM
-extern const struct rd_kafka_sasl_provider rd_kafka_sasl_scram_provider;
-#endif
-
-void rd_kafka_sasl_auth_done (rd_kafka_transport_t *rktrans);
-int rd_kafka_sasl_send (rd_kafka_transport_t *rktrans,
-                        const void *payload, int len,
-                        char *errstr, size_t errstr_size);
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_plain.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_plain.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_plain.c
deleted file mode 100644
index 57650ee..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_plain.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * Builtin SASL PLAIN support when Cyrus SASL is not available
- */
-#include "rdkafka_int.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_sasl.h"
-#include "rdkafka_sasl_int.h"
-
-
-/**
- * @brief Handle received frame from broker.
- */
-static int rd_kafka_sasl_plain_recv (struct rd_kafka_transport_s *rktrans,
-                                     const void *buf, size_t size,
-                                     char *errstr, size_t errstr_size) {
-        if (size)
-                rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLPLAIN",
-                           "Received non-empty SASL PLAIN (builtin) "
-                           "response from broker (%"PRIusz" bytes)", size);
-
-        rd_kafka_sasl_auth_done(rktrans);
-
-        return 0;
-}
-
-
-/**
- * @brief Initialize and start SASL PLAIN (builtin) authentication.
- *
- * Returns 0 on successful init and -1 on error.
- *
- * @locality broker thread
- */
-int rd_kafka_sasl_plain_client_new (rd_kafka_transport_t *rktrans,
-                                    const char *hostname,
-                                    char *errstr, size_t errstr_size) {
-        rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
-        rd_kafka_t *rk = rkb->rkb_rk;
-        /* [authzid] UTF8NUL authcid UTF8NUL passwd */
-        char buf[255+1+255+1+255+1];
-        int of = 0;
-
-        /* authzid: none (empty) */
-        /* UTF8NUL */
-        buf[of++] = 0;
-        /* authcid */
-        if (rk->rk_conf.sasl.username) {
-                int r = (int)strlen(rk->rk_conf.sasl.username);
-                r = RD_MIN(255, r);
-                memcpy(&buf[of], rk->rk_conf.sasl.username, r);
-                of += r;
-        }
-        /* UTF8NUL */
-        buf[of++] = 0;
-        /* passwd */
-        if (rk->rk_conf.sasl.password) {
-                int r = (int)strlen(rk->rk_conf.sasl.password);
-                r = RD_MIN(255, r);
-                memcpy(&buf[of], rk->rk_conf.sasl.password, r);
-                of += r;
-        }
-
-        rd_rkb_dbg(rkb, SECURITY, "SASLPLAIN",
-                   "Sending SASL PLAIN (builtin) authentication token");
-
-        if (rd_kafka_sasl_send(rktrans, buf, of,
-                               errstr, errstr_size))
-                return -1;
-
-        /* PLAIN is appearantly done here, but we still need to make sure
-         * the PLAIN frame is sent and we get a response back (empty) */
-        rktrans->rktrans_sasl.complete = 1;
-        return 0;
-}
-
-
-/**
- * @brief Validate PLAIN config
- */
-static int rd_kafka_sasl_plain_conf_validate (rd_kafka_t *rk,
-                                              char *errstr,
-                                              size_t errstr_size) {
-        if (!rk->rk_conf.sasl.username || !rk->rk_conf.sasl.password) {
-                rd_snprintf(errstr, errstr_size,
-                            "sasl.username and sasl.password must be set");
-                return -1;
-        }
-
-        return 0;
-}
-
-
-const struct rd_kafka_sasl_provider rd_kafka_sasl_plain_provider = {
-        .name          = "PLAIN (builtin)",
-        .client_new    = rd_kafka_sasl_plain_client_new,
-        .recv          = rd_kafka_sasl_plain_recv,
-        .conf_validate = rd_kafka_sasl_plain_conf_validate
-};

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_scram.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_scram.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_scram.c
deleted file mode 100644
index 968d879..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_sasl_scram.c
+++ /dev/null
@@ -1,901 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * Builtin SASL SCRAM support when Cyrus SASL is not available
- */
-#include "rdkafka_int.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_sasl.h"
-#include "rdkafka_sasl_int.h"
-#include "rdrand.h"
-
-#if WITH_SSL
-#include <openssl/hmac.h>
-#include <openssl/evp.h>
-#include <openssl/sha.h>
-#else
-#error "WITH_SSL (OpenSSL) is required for SASL SCRAM"
-#endif
-
-
-/**
- * @brief Per-connection state
- */
-struct rd_kafka_sasl_scram_state {
-        enum {
-                RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE,
-                RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE,
-                RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE,
-        } state;
-        rd_chariov_t cnonce;         /* client c-nonce */
-        rd_chariov_t first_msg_bare; /* client-first-message-bare */
-        char *ServerSignatureB64;    /* ServerSignature in Base64 */
-        const EVP_MD *evp;  /* Hash function pointer */
-};
-
-
-/**
- * @brief Close and free authentication state
- */
-static void rd_kafka_sasl_scram_close (rd_kafka_transport_t *rktrans) {
-        struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
-
-        if (!state)
-                return;
-
-        RD_IF_FREE(state->cnonce.ptr, rd_free);
-        RD_IF_FREE(state->first_msg_bare.ptr, rd_free);
-        RD_IF_FREE(state->ServerSignatureB64, rd_free);
-        rd_free(state);
-}
-
-
-
-/**
- * @brief Generates a nonce string (a random printable string)
- * @remark dst->ptr will be allocated and must be freed.
- */
-static void rd_kafka_sasl_scram_generate_nonce (rd_chariov_t *dst) {
-        int i;
-        dst->size = 32;
-        dst->ptr = rd_malloc(dst->size+1);
-        for (i = 0 ; i < (int)dst->size ; i++)
-                dst->ptr[i] = 'a'; // (char)rd_jitter(0x2d/*-*/, 0x7e/*~*/);
-        dst->ptr[i] = 0;
-}
-
-
-/**
- * @brief Parses inbuf for SCRAM attribute \p attr (e.g., 's')
- * @returns a newly allocated copy of the value, or NULL
- *          on failure in which case an error is written to \p errstr
- *          prefixed by \p description.
- */
-static char *rd_kafka_sasl_scram_get_attr (const rd_chariov_t *inbuf, char attr,
-                                           const char *description,
-                                           char *errstr, size_t errstr_size) {
-        size_t of = 0;
-
-        for (of = 0 ; of < inbuf->size ; ) {
-                const char *td;
-                size_t len;
-
-                /* Find next delimiter , (if any) */
-                td = memchr(&inbuf->ptr[of], ',', inbuf->size - of);
-                if (td)
-                        len = (size_t)(td - &inbuf->ptr[of]);
-                else
-                        len = inbuf->size - of;
-
-                /* Check if attr "x=" matches */
-                if (inbuf->ptr[of] == attr && inbuf->size > of+1 &&
-                    inbuf->ptr[of+1] == '=') {
-                        char *ret;
-                        of += 2; /* past = */
-                        ret = rd_malloc(len - 2 + 1);
-                        memcpy(ret, &inbuf->ptr[of], len - 2);
-                        ret[len-2] = '\0';
-                        return ret;
-                }
-
-                /* Not the attr we are looking for, skip
-                 * past the next delimiter and continue looking. */
-                of += len+1;
-        }
-
-        rd_snprintf(errstr, errstr_size,
-                    "%s: could not find attribute (%c)",
-                    description, attr);
-        return NULL;
-}
-
-
-/**
- * @brief Base64 encode binary input \p in
- * @returns a newly allocated base64 string
- */
-static char *rd_base64_encode (const rd_chariov_t *in) {
-        BIO *buf, *b64f;
-        BUF_MEM *ptr;
-        char *out;
-
-        b64f = BIO_new(BIO_f_base64());
-        buf = BIO_new(BIO_s_mem());
-        buf = BIO_push(b64f, buf);
-
-        BIO_set_flags(buf, BIO_FLAGS_BASE64_NO_NL);
-        BIO_set_close(buf, BIO_CLOSE);
-        BIO_write(buf, in->ptr, (int)in->size);
-        BIO_flush(buf);
-
-        BIO_get_mem_ptr(buf, &ptr);
-        out = malloc(ptr->length + 1);
-        memcpy(out, ptr->data, ptr->length);
-        out[ptr->length] = '\0';
-
-        BIO_free_all(buf);
-
-        return out;
-}
-
-/**
- * @brief Base64 decode input string \p in of size \p insize.
- * @returns -1 on invalid Base64, or 0 on successes in which case a
- *         newly allocated binary string is set in out (and size).
- */
-static int rd_base64_decode (const rd_chariov_t *in, rd_chariov_t *out) {
-        size_t asize;
-        BIO *b64, *bmem;
-
-        if (in->size == 0 || (in->size % 4) != 0)
-                return -1;
-
-        asize = (in->size * 3) / 4; /* allocation size */
-        out->ptr = rd_malloc(asize+1);
-
-        b64 = BIO_new(BIO_f_base64());
-        BIO_set_flags(b64, BIO_FLAGS_BASE64_NO_NL);
-
-        bmem = BIO_new_mem_buf(in->ptr, (int)in->size);
-        bmem = BIO_push(b64, bmem);
-
-        out->size = BIO_read(bmem, out->ptr, (int)asize+1);
-        assert(out->size <= asize);
-        BIO_free_all(bmem);
-
-#if ENABLE_DEVEL
-        /* Verify that decode==encode */
-        {
-                char *encoded = rd_base64_encode(out);
-                assert(strlen(encoded) == in->size);
-                assert(!strncmp(encoded, in->ptr, in->size));
-                rd_free(encoded);
-        }
-#endif
-
-        return 0;
-}
-
-
-/**
- * @brief Perform H(str) hash function and stores the result in \p out
- *        which must be at least EVP_MAX_MD_SIZE.
- * @returns 0 on success, else -1
- */
-static int
-rd_kafka_sasl_scram_H (rd_kafka_transport_t *rktrans,
-                       const rd_chariov_t *str,
-                       rd_chariov_t *out) {
-
-        rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_H(
-                (const unsigned char *)str->ptr, str->size,
-                (unsigned char *)out->ptr);
-
-        out->size = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_H_size;
-        return 0;
-}
-
-/**
- * @brief Perform HMAC(key,str) and stores the result in \p out
- *        which must be at least EVP_MAX_MD_SIZE.
- * @returns 0 on success, else -1
- */
-static int
-rd_kafka_sasl_scram_HMAC (rd_kafka_transport_t *rktrans,
-                          const rd_chariov_t *key,
-                          const rd_chariov_t *str,
-                          rd_chariov_t *out) {
-        const EVP_MD *evp =
-                rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp;
-        unsigned int outsize;
-
-        //printf("HMAC KEY: %s\n", rd_base64_encode(key));
-        //printf("HMAC STR: %s\n", rd_base64_encode(str));
-
-        if (!HMAC(evp,
-                  (const unsigned char *)key->ptr, (int)key->size,
-                  (const unsigned char *)str->ptr, (int)str->size,
-                  (unsigned char *)out->ptr, &outsize)) {
-                rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM",
-                           "HMAC failed");
-                return -1;
-        }
-
-        out->size = outsize;
-        //printf("HMAC OUT: %s\n", rd_base64_encode(out));
-
-        return 0;
-}
-
-
-
-/**
- * @brief Perform \p itcnt iterations of HMAC() on the given buffer \p in
- *        using \p salt, writing the output into \p out which must be
- *        at least EVP_MAX_MD_SIZE. Actual size is updated in \p *outsize.
- * @returns 0 on success, else -1
- */
-static int
-rd_kafka_sasl_scram_Hi (rd_kafka_transport_t *rktrans,
-                        const rd_chariov_t *in,
-                        const rd_chariov_t *salt,
-                        int itcnt, rd_chariov_t *out) {
-        const EVP_MD *evp =
-                rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp;
-        unsigned int  ressize = 0;
-        unsigned char tempres[EVP_MAX_MD_SIZE];
-        unsigned char *saltplus;
-        int i;
-
-        /* U1   := HMAC(str, salt + INT(1)) */
-        saltplus = rd_alloca(salt->size + 4);
-        memcpy(saltplus, salt->ptr, salt->size);
-        saltplus[salt->size]   = 0;
-        saltplus[salt->size+1] = 0;
-        saltplus[salt->size+2] = 0;
-        saltplus[salt->size+3] = 1;
-
-        /* U1   := HMAC(str, salt + INT(1)) */
-        if (!HMAC(evp,
-                  (const unsigned char *)in->ptr, (int)in->size,
-                  saltplus, salt->size+4,
-                  tempres, &ressize)) {
-                rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM",
-                           "HMAC priming failed");
-                return -1;
-        }
-
-        memcpy(out->ptr, tempres, ressize);
-
-        /* Ui-1 := HMAC(str, Ui-2) ..  */
-        for (i = 1 ; i < itcnt ; i++) {
-                unsigned char tempdest[EVP_MAX_MD_SIZE];
-                int j;
-
-                if (unlikely(!HMAC(evp,
-                                   (const unsigned char *)in->ptr, (int)in->size,
-                                   tempres, ressize,
-                                   tempdest, NULL))) {
-                        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM",
-                                   "Hi() HMAC #%d/%d failed", i, itcnt);
-                        return -1;
-                }
-
-                /* U1 XOR U2 .. */
-                for (j = 0 ; j < (int)ressize ; j++) {
-                        out->ptr[j] ^= tempdest[j];
-                        tempres[j] = tempdest[j];
-                }
-        }
-
-        out->size = ressize;
-
-        return 0;
-}
-
-
-/**
- * @returns a SASL value-safe-char encoded string, replacing "," and "="
- *          with their escaped counterparts in a newly allocated string.
- */
-static char *rd_kafka_sasl_safe_string (const char *str) {
-        char *safe = NULL, *d = NULL/*avoid warning*/;
-        int pass;
-        size_t len = 0;
-
-        /* Pass #1: scan for needed length and allocate.
-         * Pass #2: encode string */
-        for (pass = 0 ; pass < 2 ; pass++) {
-                const char *s;
-                for (s = str ; *s ; s++) {
-                        if (pass == 0) {
-                                len += 1 + (*s == ',' || *s == '=');
-                                continue;
-                        }
-
-                        if (*s == ',') {
-                                *(d++) = '=';
-                                *(d++) = '2';
-                                *(d++) = 'C';
-                        } else if (*s == '=') {
-                                *(d++) = '=';
-                                *(d++) = '3';
-                                *(d++) = 'D';
-                        } else
-                                *(d++) = *s;
-                }
-
-                if (pass == 0)
-                        d = safe = rd_malloc(len+1);
-        }
-
-        rd_assert(d == safe + (int)len);
-        *d = '\0';
-
-        return safe;
-}
-
-
-/**
- * @brief Build client-final-message-without-proof
- * @remark out->ptr will be allocated and must be freed.
- */
-static void
-rd_kafka_sasl_scram_build_client_final_message_wo_proof (
-        struct rd_kafka_sasl_scram_state *state,
-        const char *snonce,
-        rd_chariov_t *out) {
-        const char *attr_c = "biws"; /* base64 encode of "n,," */
-
-        /*
-         * client-final-message-without-proof =
-         *            channel-binding "," nonce [","
-         *            extensions]
-         */
-        out->size = strlen("c=,r=") + strlen(attr_c) +
-                state->cnonce.size + strlen(snonce);
-        out->ptr = rd_malloc(out->size+1);
-        rd_snprintf(out->ptr, out->size+1, "c=%s,r=%.*s%s",
-                    attr_c, (int)state->cnonce.size, state->cnonce.ptr, snonce);
-}
-
-
-/**
- * @brief Build client-final-message
- * @returns -1 on error.
- */
-static int
-rd_kafka_sasl_scram_build_client_final_message (
-        rd_kafka_transport_t *rktrans,
-        const rd_chariov_t *salt,
-        const char *server_nonce,
-        const rd_chariov_t *server_first_msg,
-        int itcnt, rd_chariov_t *out) {
-        struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
-        const rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf;
-        rd_chariov_t SaslPassword =
-                { .ptr = conf->sasl.password,
-                  .size = strlen(conf->sasl.password) };
-        rd_chariov_t SaltedPassword =
-                { .ptr = rd_alloca(EVP_MAX_MD_SIZE) };
-        rd_chariov_t ClientKey =
-                { .ptr = rd_alloca(EVP_MAX_MD_SIZE) };
-        rd_chariov_t ServerKey =
-                { .ptr = rd_alloca(EVP_MAX_MD_SIZE) };
-        rd_chariov_t StoredKey =
-                { .ptr = rd_alloca(EVP_MAX_MD_SIZE) };
-        rd_chariov_t AuthMessage = RD_ZERO_INIT;
-        rd_chariov_t ClientSignature =
-                { .ptr = rd_alloca(EVP_MAX_MD_SIZE) };
-        rd_chariov_t ServerSignature =
-                { .ptr = rd_alloca(EVP_MAX_MD_SIZE) };
-        const rd_chariov_t ClientKeyVerbatim =
-                { .ptr = "Client Key", .size = 10 };
-        const rd_chariov_t ServerKeyVerbatim =
-                { .ptr = "Server Key", .size = 10 };
-        rd_chariov_t ClientProof =
-                { .ptr = rd_alloca(EVP_MAX_MD_SIZE) };
-        rd_chariov_t client_final_msg_wo_proof;
-        char *ClientProofB64;
-        int i;
-
-        /* Constructing the ClientProof attribute (p):
-         *
-         * p = Base64-encoded ClientProof
-         * SaltedPassword  := Hi(Normalize(password), salt, i)
-         * ClientKey       := HMAC(SaltedPassword, "Client Key")
-         * StoredKey       := H(ClientKey)
-         * AuthMessage     := client-first-message-bare + "," +
-         *                    server-first-message + "," +
-         *                    client-final-message-without-proof
-         * ClientSignature := HMAC(StoredKey, AuthMessage)
-         * ClientProof     := ClientKey XOR ClientSignature
-         * ServerKey       := HMAC(SaltedPassword, "Server Key")
-         * ServerSignature := HMAC(ServerKey, AuthMessage)
-         */
-
-        /* SaltedPassword  := Hi(Normalize(password), salt, i) */
-        if (rd_kafka_sasl_scram_Hi(
-                    rktrans, &SaslPassword, salt,
-                    itcnt, &SaltedPassword) == -1)
-                return -1;
-
-        /* ClientKey       := HMAC(SaltedPassword, "Client Key") */
-        if (rd_kafka_sasl_scram_HMAC(
-                    rktrans, &SaltedPassword, &ClientKeyVerbatim,
-                    &ClientKey) == -1)
-                return -1;
-
-        /* StoredKey       := H(ClientKey) */
-        if (rd_kafka_sasl_scram_H(rktrans, &ClientKey, &StoredKey) == -1)
-                return -1;
-
-        /* client-final-message-without-proof */
-        rd_kafka_sasl_scram_build_client_final_message_wo_proof(
-                state, server_nonce, &client_final_msg_wo_proof);
-
-        /* AuthMessage     := client-first-message-bare + "," +
-         *                    server-first-message + "," +
-         *                    client-final-message-without-proof */
-        AuthMessage.size =
-                state->first_msg_bare.size + 1 +
-                server_first_msg->size + 1 +
-                client_final_msg_wo_proof.size;
-        AuthMessage.ptr = rd_alloca(AuthMessage.size+1);
-        rd_snprintf(AuthMessage.ptr, AuthMessage.size+1,
-                    "%.*s,%.*s,%.*s",
-                    (int)state->first_msg_bare.size, state->first_msg_bare.ptr,
-                    (int)server_first_msg->size, server_first_msg->ptr,
-                    (int)client_final_msg_wo_proof.size,
-                    client_final_msg_wo_proof.ptr);
-
-        /*
-         * Calculate ServerSignature for later verification when
-         * server-final-message is received.
-         */
-
-        /* ServerKey       := HMAC(SaltedPassword, "Server Key") */
-        if (rd_kafka_sasl_scram_HMAC(
-                    rktrans, &SaltedPassword, &ServerKeyVerbatim,
-                    &ServerKey) == -1) {
-                rd_free(client_final_msg_wo_proof.ptr);
-                return -1;
-        }
-
-        /* ServerSignature := HMAC(ServerKey, AuthMessage) */
-        if (rd_kafka_sasl_scram_HMAC(rktrans, &ServerKey,
-                                     &AuthMessage, &ServerSignature) == -1) {
-                rd_free(client_final_msg_wo_proof.ptr);
-                return -1;
-        }
-
-        /* Store the Base64 encoded ServerSignature for quick comparison */
-        state->ServerSignatureB64 = rd_base64_encode(&ServerSignature);
-
-
-        /*
-         * Continue with client-final-message
-         */
-
-        /* ClientSignature := HMAC(StoredKey, AuthMessage) */
-        if (rd_kafka_sasl_scram_HMAC(rktrans, &StoredKey,
-                                     &AuthMessage, &ClientSignature) == -1) {
-                rd_free(client_final_msg_wo_proof.ptr);
-                return -1;
-        }
-
-        /* ClientProof     := ClientKey XOR ClientSignature */
-        assert(ClientKey.size == ClientSignature.size);
-        for (i = 0 ; i < (int)ClientKey.size ; i++)
-                ClientProof.ptr[i] = ClientKey.ptr[i] ^ ClientSignature.ptr[i];
-        ClientProof.size = ClientKey.size;
-
-
-        /* Base64 encoded ClientProof */
-        ClientProofB64 = rd_base64_encode(&ClientProof);
-
-        /* Construct client-final-message */
-        out->size = client_final_msg_wo_proof.size +
-                strlen(",p=") + strlen(ClientProofB64);
-        out->ptr = rd_malloc(out->size + 1);
-
-        rd_snprintf(out->ptr, out->size+1,
-                    "%.*s,p=%s",
-                    (int)client_final_msg_wo_proof.size,
-                    client_final_msg_wo_proof.ptr,
-                    ClientProofB64);
-        rd_free(ClientProofB64);
-        rd_free(client_final_msg_wo_proof.ptr);
-
-        return 0;
-}
-
-
-/**
- * @brief Handle first message from server
- *
- * Parse server response which looks something like:
- * "r=fyko+d2lbbFgONR....,s=QSXCR+Q6sek8bf92,i=4096"
- *
- * @returns -1 on error.
- */
-static int
-rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans,
-                                                 const rd_chariov_t *in,
-                                                 rd_chariov_t *out,
-                                                 char *errstr,
-                                                 size_t errstr_size) {
-        struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
-        char *server_nonce;
-        rd_chariov_t salt_b64, salt;
-        char *itcntstr;
-        const char *endptr;
-        int itcnt;
-        char *attr_m;
-
-        /* Mandatory future extension check */
-        if ((attr_m = rd_kafka_sasl_scram_get_attr(
-                     in, 'm', NULL, NULL, 0))) {
-                rd_snprintf(errstr, errstr_size,
-                            "Unsupported mandatory SCRAM extension");
-                rd_free(attr_m);
-                return -1;
-        }
-
-        /* Server nonce */
-        if (!(server_nonce = rd_kafka_sasl_scram_get_attr(
-                      in, 'r',
-                      "Server nonce in server-first-message",
-                      errstr, errstr_size)))
-                return -1;
-
-        if (strlen(server_nonce) <= state->cnonce.size ||
-            strncmp(state->cnonce.ptr, server_nonce, state->cnonce.size)) {
-                rd_snprintf(errstr, errstr_size,
-                            "Server/client nonce mismatch in "
-                            "server-first-message");
-                rd_free(server_nonce);
-                return -1;
-        }
-
-        /* Salt (Base64) */
-        if (!(salt_b64.ptr = rd_kafka_sasl_scram_get_attr(
-                      in, 's',
-                      "Salt in server-first-message",
-                      errstr, errstr_size))) {
-                rd_free(server_nonce);
-                return -1;
-        }
-        salt_b64.size = strlen(salt_b64.ptr);
-
-        /* Convert Salt to binary */
-        if (rd_base64_decode(&salt_b64, &salt) == -1) {
-                rd_snprintf(errstr, errstr_size,
-                            "Invalid Base64 Salt in server-first-message");
-                rd_free(server_nonce);
-                rd_free(salt_b64.ptr);
-        }
-        rd_free(salt_b64.ptr);
-
-        /* Iteration count (as string) */
-        if (!(itcntstr = rd_kafka_sasl_scram_get_attr(
-                      in, 'i',
-                      "Iteration count in server-first-message",
-                      errstr, errstr_size))) {
-                rd_free(server_nonce);
-                rd_free(salt.ptr);
-                return -1;
-        }
-
-        /* Iteration count (as int) */
-        errno = 0;
-        itcnt = (int)strtoul(itcntstr, (char **)&endptr, 10);
-        if (itcntstr == endptr || *endptr != '\0' || errno != 0 ||
-            itcnt > 1000000) {
-                rd_snprintf(errstr, errstr_size,
-                            "Invalid value (not integer or too large) "
-                            "for Iteration count in server-first-message");
-                rd_free(server_nonce);
-                rd_free(salt.ptr);
-                rd_free(itcntstr);
-                return -1;
-        }
-        rd_free(itcntstr);
-
-        /* Build client-final-message */
-        if (rd_kafka_sasl_scram_build_client_final_message(
-                    rktrans, &salt, server_nonce, in, itcnt, out) == -1) {
-                rd_snprintf(errstr, errstr_size,
-                            "Failed to build SCRAM client-final-message");
-                rd_free(salt.ptr);
-                rd_free(server_nonce);
-                return -1;
-        }
-
-        rd_free(server_nonce);
-        rd_free(salt.ptr);
-
-        return 0;
-}
-
-/**
- * @brief Handle server-final-message
- * 
- *        This is the end of authentication and the SCRAM state
- *        will be freed at the end of this function regardless of
- *        authentication outcome.
- *
- * @returns -1 on failure
- */
-static int
-rd_kafka_sasl_scram_handle_server_final_message (
-        rd_kafka_transport_t *rktrans,
-        const rd_chariov_t *in,
-        char *errstr, size_t errstr_size) {
-        struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
-        char *attr_v, *attr_e;
-
-        if ((attr_e = rd_kafka_sasl_scram_get_attr(
-                            in, 'e', "server-error in server-final-message",
-                            errstr, errstr_size))) {
-                /* Authentication failed */
-
-                rd_snprintf(errstr, errstr_size,
-                            "SASL SCRAM authentication failed: "
-                            "broker responded with %s",
-                            attr_e);
-                rd_free(attr_e);
-                return -1;
-
-        } else if ((attr_v = rd_kafka_sasl_scram_get_attr(
-                     in, 'v', "verifier in server-final-message",
-                     errstr, errstr_size))) {
-                const rd_kafka_conf_t *conf;
-
-                /* Authentication succesful on server,
-                 * but we need to verify the ServerSignature too. */
-                rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER,
-                           "SCRAMAUTH",
-                           "SASL SCRAM authentication succesful on server: "
-                           "verifying ServerSignature");
-
-                if (strcmp(attr_v, state->ServerSignatureB64)) {
-                        rd_snprintf(errstr, errstr_size,
-                                    "SASL SCRAM authentication failed: "
-                                    "ServerSignature mismatch "
-                                    "(server's %s != ours %s)",
-                                    attr_v, state->ServerSignatureB64);
-                        rd_free(attr_v);
-                        return -1;
-                }
-                rd_free(attr_v);
-
-                conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf;
-
-                rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER,
-                           "SCRAMAUTH",
-                           "Authenticated as %s using %s",
-                           conf->sasl.username,
-                           conf->sasl.mechanisms);
-
-                rd_kafka_sasl_auth_done(rktrans);
-                return 0;
-
-        } else {
-                rd_snprintf(errstr, errstr_size,
-                            "SASL SCRAM authentication failed: "
-                            "no verifier or server-error returned from broker");
-                return -1;
-        }
-}
-
-
-
-/**
- * @brief Build client-first-message
- */
-static void
-rd_kafka_sasl_scram_build_client_first_message (
-        rd_kafka_transport_t *rktrans,
-        rd_chariov_t *out) {
-        char *sasl_username;
-        struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
-        const rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf;
-
-        rd_kafka_sasl_scram_generate_nonce(&state->cnonce);
-
-        sasl_username = rd_kafka_sasl_safe_string(conf->sasl.username);
-
-        out->size = strlen("n,,n=,r=") + strlen(sasl_username) +
-                state->cnonce.size;
-        out->ptr = rd_malloc(out->size+1);
-
-        rd_snprintf(out->ptr, out->size+1,
-                    "n,,n=%s,r=%.*s",
-                    sasl_username,
-                    (int)state->cnonce.size, state->cnonce.ptr);
-        rd_free(sasl_username);
-
-        /* Save client-first-message-bare (skip gs2-header) */
-        state->first_msg_bare.size = out->size-3;
-        state->first_msg_bare.ptr  = rd_memdup(out->ptr+3,
-                                               state->first_msg_bare.size);
-}
-
-
-
-/**
- * @brief SASL SCRAM client state machine
- * @returns -1 on failure (errstr set), else 0.
- */
-static int rd_kafka_sasl_scram_fsm (rd_kafka_transport_t *rktrans,
-                                    const rd_chariov_t *in,
-                                    char *errstr, size_t errstr_size) {
-        static const char *state_names[] = {
-                "client-first-message",
-                "server-first-message",
-                "client-final-message",
-        };
-        struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
-        rd_chariov_t out = RD_ZERO_INIT;
-        int r = -1;
-        rd_ts_t ts_start = rd_clock();
-        int prev_state = state->state;
-
-        rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLSCRAM",
-                   "SASL SCRAM client in state %s",
-                   state_names[state->state]);
-
-        switch (state->state)
-        {
-        case RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE:
-                rd_dassert(!in); /* Not expecting any server-input */
-
-                rd_kafka_sasl_scram_build_client_first_message(rktrans, &out);
-                state->state = RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE;
-                break;
-
-
-        case RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE:
-                rd_dassert(in); /* Requires server-input */
-
-                if (rd_kafka_sasl_scram_handle_server_first_message(
-                             rktrans, in, &out, errstr, errstr_size) == -1)
-                        return -1;
-
-                state->state = RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE;
-                break;
-
-        case RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE:
-                rd_dassert(in);  /* Requires server-input */
-
-                r = rd_kafka_sasl_scram_handle_server_final_message(
-                        rktrans, in, errstr, errstr_size);
-                break;
-        }
-
-        if (out.ptr) {
-                r = rd_kafka_sasl_send(rktrans, out.ptr, (int)out.size,
-                                       errstr, errstr_size);
-                rd_free(out.ptr);
-        }
-
-        ts_start = (rd_clock() - ts_start) / 1000;
-        if (ts_start >= 100)
-                rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM",
-                           "SASL SCRAM state %s handled in %"PRId64"ms",
-                           state_names[prev_state], ts_start);
-
-
-        return r;
-}
-
-
-/**
- * @brief Handle received frame from broker.
- */
-static int rd_kafka_sasl_scram_recv (rd_kafka_transport_t *rktrans,
-                                     const void *buf, size_t size,
-                                     char *errstr, size_t errstr_size) {
-        const rd_chariov_t in = { .ptr = (char *)buf, .size = size };
-        return rd_kafka_sasl_scram_fsm(rktrans, &in, errstr, errstr_size);
-}
-
-
-/**
- * @brief Initialize and start SASL SCRAM (builtin) authentication.
- *
- * Returns 0 on successful init and -1 on error.
- *
- * @locality broker thread
- */
-static int rd_kafka_sasl_scram_client_new (rd_kafka_transport_t *rktrans,
-                                    const char *hostname,
-                                    char *errstr, size_t errstr_size) {
-        struct rd_kafka_sasl_scram_state *state;
-
-        state = rd_calloc(1, sizeof(*state));
-        state->state = RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE;
-        rktrans->rktrans_sasl.state = state;
-
-        /* Kick off the FSM */
-        return rd_kafka_sasl_scram_fsm(rktrans, NULL, errstr, errstr_size);
-}
-
-
-
-/**
- * @brief Validate SCRAM config and look up the hash function
- */
-static int rd_kafka_sasl_scram_conf_validate (rd_kafka_t *rk,
-                                              char *errstr,
-                                              size_t errstr_size) {
-        const char *mech = rk->rk_conf.sasl.mechanisms;
-
-        if (!rk->rk_conf.sasl.username || !rk->rk_conf.sasl.password) {
-                rd_snprintf(errstr, errstr_size,
-                            "sasl.username and sasl.password must be set");
-                return -1;
-        }
-
-        if (!strcmp(mech, "SCRAM-SHA-1")) {
-                rk->rk_conf.sasl.scram_evp = EVP_sha1();
-                rk->rk_conf.sasl.scram_H = SHA1;
-                rk->rk_conf.sasl.scram_H_size = SHA_DIGEST_LENGTH;
-        } else if (!strcmp(mech, "SCRAM-SHA-256")) {
-                rk->rk_conf.sasl.scram_evp = EVP_sha256();
-                rk->rk_conf.sasl.scram_H = SHA256;
-                rk->rk_conf.sasl.scram_H_size = SHA256_DIGEST_LENGTH;
-        } else if (!strcmp(mech, "SCRAM-SHA-512")) {
-                rk->rk_conf.sasl.scram_evp = EVP_sha512();
-                rk->rk_conf.sasl.scram_H = SHA512;
-                rk->rk_conf.sasl.scram_H_size = SHA512_DIGEST_LENGTH;
-        } else {
-                rd_snprintf(errstr, errstr_size,
-                            "Unsupported hash function: %s "
-                            "(try SCRAM-SHA-512)",
-                            mech);
-                return -1;
-        }
-
-        return 0;
-}
-
-
-
-
-const struct rd_kafka_sasl_provider rd_kafka_sasl_scram_provider = {
-        .name          = "SCRAM (builtin)",
-        .client_new    = rd_kafka_sasl_scram_client_new,
-        .recv          = rd_kafka_sasl_scram_recv,
-        .close         = rd_kafka_sasl_scram_close,
-        .conf_validate = rd_kafka_sasl_scram_conf_validate,
-};


[02/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/rdkafkacpp.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/rdkafkacpp.h b/thirdparty/librdkafka-0.11.4/src-cpp/rdkafkacpp.h
new file mode 100644
index 0000000..9f8196b
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/rdkafkacpp.h
@@ -0,0 +1,2284 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKACPP_H_
+#define _RDKAFKACPP_H_
+
+/**
+ * @file rdkafkacpp.h
+ * @brief Apache Kafka C/C++ consumer and producer client library.
+ *
+ * rdkafkacpp.h contains the public C++ API for librdkafka.
+ * The API is documented in this file as comments prefixing the class,
+ * function, type, enum, define, etc.
+ * For more information, see the C interface in rdkafka.h and read the
+ * manual in INTRODUCTION.md.
+ * The C++ interface is STD C++ '03 compliant and adheres to the
+ * Google C++ Style Guide.
+
+ * @sa For the C interface see rdkafka.h
+ *
+ * @tableofcontents
+ */
+
+/**@cond NO_DOC*/
+#include <string>
+#include <list>
+#include <vector>
+#include <stdint.h>
+
+
+#ifdef _MSC_VER
+#undef RD_EXPORT
+#ifdef LIBRDKAFKA_STATICLIB
+#define RD_EXPORT
+#else
+#ifdef LIBRDKAFKACPP_EXPORTS
+#define RD_EXPORT __declspec(dllexport)
+#else
+#define RD_EXPORT __declspec(dllimport)
+#endif
+#endif
+#else
+#define RD_EXPORT
+#endif
+
+/**@endcond*/
+
+extern "C" {
+        /* Forward declarations */
+        struct rd_kafka_s;
+        struct rd_kafka_topic_s;
+        struct rd_kafka_message_s;
+};
+
+namespace RdKafka {
+
+
+/**
+ * @name Miscellaneous APIs
+ * @{
+ */
+
+/**
+ * @brief librdkafka version
+ *
+ * Interpreted as hex \c MM.mm.rr.xx:
+ *  - MM = Major
+ *  - mm = minor
+ *  - rr = revision
+ *  - xx = pre-release id (0xff is the final release)
+ *
+ * E.g.: \c 0x000801ff = 0.8.1
+ *
+ * @remark This value should only be used during compile time,
+ *         for runtime checks of version use RdKafka::version()
+ */
+#define RD_KAFKA_VERSION  0x000b04ff
+
+/**
+ * @brief Returns the librdkafka version as integer.
+ *
+ * @sa See RD_KAFKA_VERSION for how to parse the integer format.
+ */
+RD_EXPORT
+int          version ();
+
+/**
+ * @brief Returns the librdkafka version as string.
+ */
+RD_EXPORT
+std::string  version_str();
+
+/**
+ * @brief Returns a CSV list of the supported debug contexts
+ *        for use with Conf::Set("debug", ..).
+ */
+RD_EXPORT
+std::string get_debug_contexts();
+
+/**
+ * @brief Wait for all rd_kafka_t objects to be destroyed.
+ *
+ * @returns 0 if all kafka objects are now destroyed, or -1 if the
+ * timeout was reached.
+ * Since RdKafka handle deletion is an asynch operation the
+ * \p wait_destroyed() function can be used for applications where
+ * a clean shutdown is required.
+ */
+RD_EXPORT
+int          wait_destroyed(int timeout_ms);
+
+
+/**@}*/
+
+
+
+/**
+ * @name Constants, errors, types
+ * @{
+ *
+ *
+ */
+
+/**
+ * @brief Error codes.
+ *
+ * The negative error codes delimited by two underscores
+ * (\c _ERR__..) denotes errors internal to librdkafka and are
+ * displayed as \c \"Local: \<error string..\>\", while the error codes
+ * delimited by a single underscore (\c ERR_..) denote broker
+ * errors and are displayed as \c \"Broker: \<error string..\>\".
+ *
+ * @sa Use RdKafka::err2str() to translate an error code a human readable string
+ */
+enum ErrorCode {
+	/* Internal errors to rdkafka: */
+	/** Begin internal error codes */
+	ERR__BEGIN = -200,
+	/** Received message is incorrect */
+	ERR__BAD_MSG = -199,
+	/** Bad/unknown compression */
+	ERR__BAD_COMPRESSION = -198,
+	/** Broker is going away */
+	ERR__DESTROY = -197,
+	/** Generic failure */
+	ERR__FAIL = -196,
+	/** Broker transport failure */
+	ERR__TRANSPORT = -195,
+	/** Critical system resource */
+	ERR__CRIT_SYS_RESOURCE = -194,
+	/** Failed to resolve broker */
+	ERR__RESOLVE = -193,
+	/** Produced message timed out*/
+	ERR__MSG_TIMED_OUT = -192,
+	/** Reached the end of the topic+partition queue on
+	 * the broker. Not really an error. */
+	ERR__PARTITION_EOF = -191,
+	/** Permanent: Partition does not exist in cluster. */
+	ERR__UNKNOWN_PARTITION = -190,
+	/** File or filesystem error */
+	ERR__FS = -189,
+	 /** Permanent: Topic does not exist in cluster. */
+	ERR__UNKNOWN_TOPIC = -188,
+	/** All broker connections are down. */
+	ERR__ALL_BROKERS_DOWN = -187,
+	/** Invalid argument, or invalid configuration */
+	ERR__INVALID_ARG = -186,
+	/** Operation timed out */
+	ERR__TIMED_OUT = -185,
+	/** Queue is full */
+	ERR__QUEUE_FULL = -184,
+	/** ISR count < required.acks */
+        ERR__ISR_INSUFF = -183,
+	/** Broker node update */
+        ERR__NODE_UPDATE = -182,
+	/** SSL error */
+	ERR__SSL = -181,
+	/** Waiting for coordinator to become available. */
+        ERR__WAIT_COORD = -180,
+	/** Unknown client group */
+        ERR__UNKNOWN_GROUP = -179,
+	/** Operation in progress */
+        ERR__IN_PROGRESS = -178,
+	 /** Previous operation in progress, wait for it to finish. */
+        ERR__PREV_IN_PROGRESS = -177,
+	 /** This operation would interfere with an existing subscription */
+        ERR__EXISTING_SUBSCRIPTION = -176,
+	/** Assigned partitions (rebalance_cb) */
+        ERR__ASSIGN_PARTITIONS = -175,
+	/** Revoked partitions (rebalance_cb) */
+        ERR__REVOKE_PARTITIONS = -174,
+	/** Conflicting use */
+        ERR__CONFLICT = -173,
+	/** Wrong state */
+        ERR__STATE = -172,
+	/** Unknown protocol */
+        ERR__UNKNOWN_PROTOCOL = -171,
+	/** Not implemented */
+        ERR__NOT_IMPLEMENTED = -170,
+	/** Authentication failure*/
+	ERR__AUTHENTICATION = -169,
+	/** No stored offset */
+	ERR__NO_OFFSET = -168,
+	/** Outdated */
+	ERR__OUTDATED = -167,
+	/** Timed out in queue */
+	ERR__TIMED_OUT_QUEUE = -166,
+        /** Feature not supported by broker */
+        ERR__UNSUPPORTED_FEATURE = -165,
+        /** Awaiting cache update */
+        ERR__WAIT_CACHE = -164,
+        /** Operation interrupted */
+        ERR__INTR = -163,
+        /** Key serialization error */
+        ERR__KEY_SERIALIZATION = -162,
+        /** Value serialization error */
+        ERR__VALUE_SERIALIZATION = -161,
+        /** Key deserialization error */
+        ERR__KEY_DESERIALIZATION = -160,
+        /** Value deserialization error */
+        ERR__VALUE_DESERIALIZATION = -159,
+        /** Partial response */
+        ERR__PARTIAL = -158,
+        /** Modification attempted on read-only object */
+        ERR__READ_ONLY = -157,
+        /** No such entry / item not found */
+        ERR__NOENT = -156,
+        /** Read underflow */
+        ERR__UNDERFLOW = -155,
+
+        /** End internal error codes */
+	ERR__END = -100,
+
+	/* Kafka broker errors: */
+	/** Unknown broker error */
+	ERR_UNKNOWN = -1,
+	/** Success */
+	ERR_NO_ERROR = 0,
+	/** Offset out of range */
+	ERR_OFFSET_OUT_OF_RANGE = 1,
+	/** Invalid message */
+	ERR_INVALID_MSG = 2,
+	/** Unknown topic or partition */
+	ERR_UNKNOWN_TOPIC_OR_PART = 3,
+	/** Invalid message size */
+	ERR_INVALID_MSG_SIZE = 4,
+	/** Leader not available */
+	ERR_LEADER_NOT_AVAILABLE = 5,
+	/** Not leader for partition */
+	ERR_NOT_LEADER_FOR_PARTITION = 6,
+	/** Request timed out */
+	ERR_REQUEST_TIMED_OUT = 7,
+	/** Broker not available */
+	ERR_BROKER_NOT_AVAILABLE = 8,
+	/** Replica not available */
+	ERR_REPLICA_NOT_AVAILABLE = 9,
+	/** Message size too large */
+	ERR_MSG_SIZE_TOO_LARGE = 10,
+	/** StaleControllerEpochCode */
+	ERR_STALE_CTRL_EPOCH = 11,
+	/** Offset metadata string too large */
+	ERR_OFFSET_METADATA_TOO_LARGE = 12,
+	/** Broker disconnected before response received */
+	ERR_NETWORK_EXCEPTION = 13,
+	/** Group coordinator load in progress */
+        ERR_GROUP_LOAD_IN_PROGRESS = 14,
+	 /** Group coordinator not available */
+        ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15,
+	/** Not coordinator for group */
+        ERR_NOT_COORDINATOR_FOR_GROUP = 16,
+	/** Invalid topic */
+        ERR_TOPIC_EXCEPTION = 17,
+	/** Message batch larger than configured server segment size */
+        ERR_RECORD_LIST_TOO_LARGE = 18,
+	/** Not enough in-sync replicas */
+        ERR_NOT_ENOUGH_REPLICAS = 19,
+	/** Message(s) written to insufficient number of in-sync replicas */
+        ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
+	/** Invalid required acks value */
+        ERR_INVALID_REQUIRED_ACKS = 21,
+	/** Specified group generation id is not valid */
+        ERR_ILLEGAL_GENERATION = 22,
+	/** Inconsistent group protocol */
+        ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
+	/** Invalid group.id */
+	ERR_INVALID_GROUP_ID = 24,
+	/** Unknown member */
+        ERR_UNKNOWN_MEMBER_ID = 25,
+	/** Invalid session timeout */
+        ERR_INVALID_SESSION_TIMEOUT = 26,
+	/** Group rebalance in progress */
+	ERR_REBALANCE_IN_PROGRESS = 27,
+	/** Commit offset data size is not valid */
+        ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
+	/** Topic authorization failed */
+        ERR_TOPIC_AUTHORIZATION_FAILED = 29,
+	/** Group authorization failed */
+	ERR_GROUP_AUTHORIZATION_FAILED = 30,
+	/** Cluster authorization failed */
+	ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
+        /** Invalid timestamp */
+        ERR_INVALID_TIMESTAMP = 32,
+        /** Unsupported SASL mechanism */
+        ERR_UNSUPPORTED_SASL_MECHANISM = 33,
+        /** Illegal SASL state */
+        ERR_ILLEGAL_SASL_STATE = 34,
+        /** Unuspported version */
+        ERR_UNSUPPORTED_VERSION = 35,
+        /** Topic already exists */
+        ERR_TOPIC_ALREADY_EXISTS = 36,
+        /** Invalid number of partitions */
+        ERR_INVALID_PARTITIONS = 37,
+        /** Invalid replication factor */
+        ERR_INVALID_REPLICATION_FACTOR = 38,
+        /** Invalid replica assignment */
+        ERR_INVALID_REPLICA_ASSIGNMENT = 39,
+        /** Invalid config */
+        ERR_INVALID_CONFIG = 40,
+        /** Not controller for cluster */
+        ERR_NOT_CONTROLLER = 41,
+        /** Invalid request */
+        ERR_INVALID_REQUEST = 42,
+        /** Message format on broker does not support request */
+        ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
+        /** Isolation policy volation */
+        ERR_POLICY_VIOLATION = 44,
+        /** Broker received an out of order sequence number */
+        ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
+        /** Broker received a duplicate sequence number */
+        ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
+        /** Producer attempted an operation with an old epoch */
+        ERR_INVALID_PRODUCER_EPOCH = 47,
+        /** Producer attempted a transactional operation in an invalid state */
+        ERR_INVALID_TXN_STATE = 48,
+        /** Producer attempted to use a producer id which is not
+         *  currently assigned to its transactional id */
+        ERR_INVALID_PRODUCER_ID_MAPPING = 49,
+        /** Transaction timeout is larger than the maximum
+         *  value allowed by the broker's max.transaction.timeout.ms */
+        ERR_INVALID_TRANSACTION_TIMEOUT = 50,
+        /** Producer attempted to update a transaction while another
+         *  concurrent operation on the same transaction was ongoing */
+        ERR_CONCURRENT_TRANSACTIONS = 51,
+        /** Indicates that the transaction coordinator sending a
+         *  WriteTxnMarker is no longer the current coordinator for a
+         *  given producer */
+        ERR_TRANSACTION_COORDINATOR_FENCED = 52,
+        /** Transactional Id authorization failed */
+        ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
+        /** Security features are disabled */
+        ERR_SECURITY_DISABLED = 54,
+        /** Operation not attempted */
+        ERR_OPERATION_NOT_ATTEMPTED = 55
+};
+
+
+/**
+ * @brief Returns a human readable representation of a kafka error.
+ */
+RD_EXPORT
+std::string  err2str(RdKafka::ErrorCode err);
+
+
+/**@} */
+
+
+
+/**@cond NO_DOC*/
+/* Forward declarations */
+class Producer;
+class Message;
+class Queue;
+class Event;
+class Topic;
+class TopicPartition;
+class Metadata;
+class KafkaConsumer;
+/**@endcond*/
+
+
+/**
+ * @name Callback classes
+ * @{
+ *
+ *
+ * librdkafka uses (optional) callbacks to propagate information and
+ * delegate decisions to the application logic.
+ *
+ * An application must call RdKafka::poll() at regular intervals to
+ * serve queued callbacks.
+ */
+
+
+/**
+ * @brief Delivery Report callback class
+ *
+ * The delivery report callback will be called once for each message
+ * accepted by RdKafka::Producer::produce() (et.al) with
+ * RdKafka::Message::err() set to indicate the result of the produce request.
+ *
+ * The callback is called when a message is succesfully produced or
+ * if librdkafka encountered a permanent failure, or the retry counter for
+ * temporary errors has been exhausted.
+ *
+ * An application must call RdKafka::poll() at regular intervals to
+ * serve queued delivery report callbacks.
+
+ */
+class RD_EXPORT DeliveryReportCb {
+ public:
+  /**
+   * @brief Delivery report callback.
+   */
+  virtual void dr_cb (Message &message) = 0;
+
+  virtual ~DeliveryReportCb() { }
+};
+
+
+/**
+ * @brief Partitioner callback class
+ *
+ * Generic partitioner callback class for implementing custom partitioners.
+ *
+ * @sa RdKafka::Conf::set() \c "partitioner_cb"
+ */
+class RD_EXPORT PartitionerCb {
+ public:
+  /**
+   * @brief Partitioner callback
+   *
+   * Return the partition to use for \p key in \p topic.
+   *
+   * The \p msg_opaque is the same \p msg_opaque provided in the
+   * RdKafka::Producer::produce() call.
+   *
+   * @remark \p key may be NULL or the empty.
+   *
+   * @returns Must return a value between 0 and \p partition_cnt (non-inclusive).
+   *          May return RD_KAFKA_PARTITION_UA (-1) if partitioning failed.
+   *
+   * @sa The callback may use RdKafka::Topic::partition_available() to check
+   *     if a partition has an active leader broker.
+   */
+  virtual int32_t partitioner_cb (const Topic *topic,
+                                  const std::string *key,
+                                  int32_t partition_cnt,
+                                  void *msg_opaque) = 0;
+
+  virtual ~PartitionerCb() { }
+};
+
+/**
+ * @brief  Variant partitioner with key pointer
+ *
+ */
+class PartitionerKeyPointerCb {
+ public:
+  /**
+   * @brief Variant partitioner callback that gets \p key as pointer and length
+   *        instead of as a const std::string *.
+   *
+   * @remark \p key may be NULL or have \p key_len 0.
+   *
+   * @sa See RdKafka::PartitionerCb::partitioner_cb() for exact semantics
+   */
+  virtual int32_t partitioner_cb (const Topic *topic,
+                                  const void *key,
+                                  size_t key_len,
+                                  int32_t partition_cnt,
+                                  void *msg_opaque) = 0;
+
+  virtual ~PartitionerKeyPointerCb() { }
+};
+
+
+
+/**
+ * @brief Event callback class
+ *
+ * Events are a generic interface for propagating errors, statistics, logs, etc
+ * from librdkafka to the application.
+ *
+ * @sa RdKafka::Event
+ */
+class RD_EXPORT EventCb {
+ public:
+  /**
+   * @brief Event callback
+   *
+   * @sa RdKafka::Event
+   */
+  virtual void event_cb (Event &event) = 0;
+
+  virtual ~EventCb() { }
+};
+
+
+/**
+ * @brief Event object class as passed to the EventCb callback.
+ */
+class RD_EXPORT Event {
+ public:
+  /** @brief Event type */
+  enum Type {
+    EVENT_ERROR,     /**< Event is an error condition */
+    EVENT_STATS,     /**< Event is a statistics JSON document */
+    EVENT_LOG,       /**< Event is a log message */
+    EVENT_THROTTLE   /**< Event is a throttle level signaling from the broker */
+  };
+
+  /** @brief EVENT_LOG severities (conforms to syslog(3) severities) */
+  enum Severity {
+    EVENT_SEVERITY_EMERG = 0,
+    EVENT_SEVERITY_ALERT = 1,
+    EVENT_SEVERITY_CRITICAL = 2,
+    EVENT_SEVERITY_ERROR = 3,
+    EVENT_SEVERITY_WARNING = 4,
+    EVENT_SEVERITY_NOTICE = 5,
+    EVENT_SEVERITY_INFO = 6,
+    EVENT_SEVERITY_DEBUG = 7
+  };
+
+  virtual ~Event () { }
+
+  /*
+   * Event Accessor methods
+   */
+
+  /**
+   * @returns The event type
+   * @remark Applies to all event types
+   */
+  virtual Type        type () const = 0;
+
+  /**
+   * @returns Event error, if any.
+   * @remark Applies to all event types except THROTTLE
+   */
+  virtual ErrorCode   err () const = 0;
+
+  /**
+   * @returns Log severity level.
+   * @remark Applies to LOG event type.
+   */
+  virtual Severity    severity () const = 0;
+
+  /**
+   * @returns Log facility string.
+   * @remark Applies to LOG event type.
+   */
+  virtual std::string fac () const = 0;
+
+  /**
+   * @returns Log message string.
+   *
+   * \c EVENT_LOG: Log message string.
+   * \c EVENT_STATS: JSON object (as string).
+   *
+   * @remark Applies to LOG event type.
+   */
+  virtual std::string str () const = 0;
+
+  /**
+   * @returns Throttle time in milliseconds.
+   * @remark Applies to THROTTLE event type.
+   */
+  virtual int         throttle_time () const = 0;
+
+  /**
+   * @returns Throttling broker's name.
+   * @remark Applies to THROTTLE event type.
+   */
+  virtual std::string broker_name () const = 0;
+
+  /**
+   * @returns Throttling broker's id.
+   * @remark Applies to THROTTLE event type.
+   */
+  virtual int         broker_id () const = 0;
+};
+
+
+
+/**
+ * @brief Consume callback class
+ */
+class RD_EXPORT ConsumeCb {
+ public:
+  /**
+   * @brief The consume callback is used with
+   *        RdKafka::Consumer::consume_callback()
+   *        methods and will be called for each consumed \p message.
+   *
+   * The callback interface is optional but provides increased performance.
+   */
+  virtual void consume_cb (Message &message, void *opaque) = 0;
+
+  virtual ~ConsumeCb() { }
+};
+
+
+/**
+ * @brief \b KafkaConsunmer: Rebalance callback class
+ */
+class RD_EXPORT RebalanceCb {
+public:
+  /**
+   * @brief Group rebalance callback for use with RdKafka::KafkaConsunmer
+   *
+   * Registering a \p rebalance_cb turns off librdkafka's automatic
+   * partition assignment/revocation and instead delegates that responsibility
+   * to the application's \p rebalance_cb.
+   *
+   * The rebalance callback is responsible for updating librdkafka's
+   * assignment set based on the two events: RdKafka::ERR__ASSIGN_PARTITIONS
+   * and RdKafka::ERR__REVOKE_PARTITIONS but should also be able to handle
+   * arbitrary rebalancing failures where \p err is neither of those.
+   * @remark In this latter case (arbitrary error), the application must
+   *         call unassign() to synchronize state.
+
+   *
+   * Without a rebalance callback this is done automatically by librdkafka
+   * but registering a rebalance callback gives the application flexibility
+   * in performing other operations along with the assinging/revocation,
+   * such as fetching offsets from an alternate location (on assign)
+   * or manually committing offsets (on revoke).
+   *
+   * The following example show's the application's responsibilities:
+   * @code
+   *    class MyRebalanceCb : public RdKafka::RebalanceCb {
+   *     public:
+   *      void rebalance_cb (RdKafka::KafkaConsumer *consumer,
+   *     	      RdKafka::ErrorCode err,
+   *                  std::vector<RdKafka::TopicPartition*> &partitions) {
+   *         if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
+   *           // application may load offets from arbitrary external
+   *           // storage here and update \p partitions
+   *
+   *           consumer->assign(partitions);
+   *
+   *         } else if (err == RdKafka::ERR__REVOKE_PARTITIONS) {
+   *           // Application may commit offsets manually here
+   *           // if auto.commit.enable=false
+   *
+   *           consumer->unassign();
+   *
+   *         } else {
+   *           std::cerr << "Rebalancing error: <<
+   *                        RdKafka::err2str(err) << std::endl;
+   *           consumer->unassign();
+   *         }
+   *     }
+   *  }
+   * @endcode
+   */
+ virtual void rebalance_cb (RdKafka::KafkaConsumer *consumer,
+			    RdKafka::ErrorCode err,
+                            std::vector<TopicPartition*>&partitions) = 0;
+
+ virtual ~RebalanceCb() { }
+};
+
+
+/**
+ * @brief Offset Commit callback class
+ */
+class RD_EXPORT OffsetCommitCb {
+public:
+  /**
+   * @brief Set offset commit callback for use with consumer groups
+   *
+   * The results of automatic or manual offset commits will be scheduled
+   * for this callback and is served by RdKafka::KafkaConsumer::consume().
+   *
+   * If no partitions had valid offsets to commit this callback will be called
+   * with \p err == ERR__NO_OFFSET which is not to be considered an error.
+   *
+   * The \p offsets list contains per-partition information:
+   *   - \c topic      The topic committed
+   *   - \c partition  The partition committed
+   *   - \c offset:    Committed offset (attempted)
+   *   - \c err:       Commit error
+   */
+  virtual void offset_commit_cb(RdKafka::ErrorCode err,
+                                std::vector<TopicPartition*>&offsets) = 0;
+
+  virtual ~OffsetCommitCb() { }
+};
+
+
+
+/**
+ * @brief \b Portability: SocketCb callback class
+ *
+ */
+class RD_EXPORT SocketCb {
+ public:
+  /**
+   * @brief Socket callback
+   *
+   * The socket callback is responsible for opening a socket
+   * according to the supplied \p domain, \p type and \p protocol.
+   * The socket shall be created with \c CLOEXEC set in a racefree fashion, if
+   * possible.
+   *
+   * It is typically not required to register an alternative socket
+   * implementation
+   *
+   * @returns The socket file descriptor or -1 on error (\c errno must be set)
+   */
+  virtual int socket_cb (int domain, int type, int protocol) = 0;
+
+  virtual ~SocketCb() { }
+};
+
+
+/**
+ * @brief \b Portability: OpenCb callback class
+ *
+ */
+class RD_EXPORT OpenCb {
+ public:
+  /**
+   * @brief Open callback
+   * The open callback is responsible for opening the file specified by
+   * \p pathname, using \p flags and \p mode.
+   * The file shall be opened with \c CLOEXEC set in a racefree fashion, if
+   * possible.
+   *
+   * It is typically not required to register an alternative open implementation
+   *
+   * @remark Not currently available on native Win32
+   */
+  virtual int open_cb (const std::string &path, int flags, int mode) = 0;
+
+  virtual ~OpenCb() { }
+};
+
+
+/**@}*/
+
+
+
+
+/**
+ * @name Configuration interface
+ * @{
+ *
+ */
+
+/**
+ * @brief Configuration interface
+ *
+ * Holds either global or topic configuration that are passed to
+ * RdKafka::Consumer::create(), RdKafka::Producer::create(),
+ * RdKafka::KafkaConsumer::create(), etc.
+ *
+ * @sa CONFIGURATION.md for the full list of supported properties.
+ */
+class RD_EXPORT Conf {
+ public:
+  /**
+   * @brief Configuration object type
+   */
+  enum ConfType {
+    CONF_GLOBAL, /**< Global configuration */
+    CONF_TOPIC   /**< Topic specific configuration */
+  };
+
+  /**
+   * @brief RdKafka::Conf::Set() result code
+   */
+  enum ConfResult {
+    CONF_UNKNOWN = -2,  /**< Unknown configuration property */
+    CONF_INVALID = -1,  /**< Invalid configuration value */
+    CONF_OK = 0         /**< Configuration property was succesfully set */
+  };
+
+
+  /**
+   * @brief Create configuration object
+   */
+  static Conf *create (ConfType type);
+
+  virtual ~Conf () { }
+
+  /**
+   * @brief Set configuration property \p name to value \p value.
+   *
+   * Fallthrough:
+   * Topic-level configuration properties may be set using this interface
+   * in which case they are applied on the \c default_topic_conf.
+   * If no \c default_topic_conf has been set one will be created.
+   * Any sub-sequent set("default_topic_conf", ..) calls will
+   * replace the current default topic configuration.
+
+   * @returns CONF_OK on success, else writes a human readable error
+   *          description to \p errstr on error.
+   */
+  virtual Conf::ConfResult set (const std::string &name,
+                                const std::string &value,
+                                std::string &errstr) = 0;
+
+  /** @brief Use with \p name = \c \"dr_cb\" */
+  virtual Conf::ConfResult set (const std::string &name,
+                                DeliveryReportCb *dr_cb,
+                                std::string &errstr) = 0;
+
+  /** @brief Use with \p name = \c \"event_cb\" */
+  virtual Conf::ConfResult set (const std::string &name,
+                                EventCb *event_cb,
+                                std::string &errstr) = 0;
+
+  /** @brief Use with \p name = \c \"default_topic_conf\"
+   *
+   * Sets the default topic configuration to use for for automatically
+   * subscribed topics.
+   *
+   * @sa RdKafka::KafkaConsumer::subscribe()
+   */
+  virtual Conf::ConfResult set (const std::string &name,
+                                const Conf *topic_conf,
+                                std::string &errstr) = 0;
+
+  /** @brief Use with \p name = \c \"partitioner_cb\" */
+  virtual Conf::ConfResult set (const std::string &name,
+                                PartitionerCb *partitioner_cb,
+                                std::string &errstr) = 0;
+
+  /** @brief Use with \p name = \c \"partitioner_key_pointer_cb\" */
+  virtual Conf::ConfResult set (const std::string &name,
+                                PartitionerKeyPointerCb *partitioner_kp_cb,
+                                std::string &errstr) = 0;
+
+  /** @brief Use with \p name = \c \"socket_cb\" */
+  virtual Conf::ConfResult set (const std::string &name, SocketCb *socket_cb,
+                                std::string &errstr) = 0;
+
+  /** @brief Use with \p name = \c \"open_cb\" */
+  virtual Conf::ConfResult set (const std::string &name, OpenCb *open_cb,
+                                std::string &errstr) = 0;
+
+  /** @brief Use with \p name = \c \"rebalance_cb\" */
+  virtual Conf::ConfResult set (const std::string &name,
+                                RebalanceCb *rebalance_cb,
+                                std::string &errstr) = 0;
+
+  /** @brief Use with \p name = \c \"offset_commit_cb\" */
+  virtual Conf::ConfResult set (const std::string &name,
+                                OffsetCommitCb *offset_commit_cb,
+                                std::string &errstr) = 0;
+
+  /** @brief Query single configuration value
+   *
+   * Do not use this method to get callbacks registered by the configuration file.
+   * Instead use the specific get() methods with the specific callback parameter in the signature.
+   *
+   * Fallthrough:
+   * Topic-level configuration properties from the \c default_topic_conf
+   * may be retrieved using this interface.
+   *
+   *  @returns CONF_OK if the property was set previously set and
+   *           returns the value in \p value. */
+  virtual Conf::ConfResult get(const std::string &name,
+	  std::string &value) const = 0;
+
+  /** @brief Query single configuration value
+   *  @returns CONF_OK if the property was set previously set and
+   *           returns the value in \p dr_cb. */
+  virtual Conf::ConfResult get(DeliveryReportCb *&dr_cb) const = 0;
+
+  /** @brief Query single configuration value
+   *  @returns CONF_OK if the property was set previously set and
+   *           returns the value in \p event_cb. */
+  virtual Conf::ConfResult get(EventCb *&event_cb) const = 0;
+
+  /** @brief Query single configuration value
+   *  @returns CONF_OK if the property was set previously set and
+   *           returns the value in \p partitioner_cb. */
+  virtual Conf::ConfResult get(PartitionerCb *&partitioner_cb) const = 0;
+
+  /** @brief Query single configuration value
+   *  @returns CONF_OK if the property was set previously set and
+   *           returns the value in \p partitioner_kp_cb. */
+  virtual Conf::ConfResult get(PartitionerKeyPointerCb *&partitioner_kp_cb) const = 0;
+
+  /** @brief Query single configuration value
+   *  @returns CONF_OK if the property was set previously set and
+   *           returns the value in \p socket_cb. */
+  virtual Conf::ConfResult get(SocketCb *&socket_cb) const = 0;
+
+  /** @brief Query single configuration value
+   *  @returns CONF_OK if the property was set previously set and
+   *           returns the value in \p open_cb. */
+  virtual Conf::ConfResult get(OpenCb *&open_cb) const = 0;
+
+  /** @brief Query single configuration value
+   *  @returns CONF_OK if the property was set previously set and
+   *           returns the value in \p rebalance_cb. */
+  virtual Conf::ConfResult get(RebalanceCb *&rebalance_cb) const = 0;
+
+  /** @brief Query single configuration value
+   *  @returns CONF_OK if the property was set previously set and
+   *           returns the value in \p offset_commit_cb. */
+  virtual Conf::ConfResult get(OffsetCommitCb *&offset_commit_cb) const = 0;
+
+  /** @brief Dump configuration names and values to list containing
+   *         name,value tuples */
+  virtual std::list<std::string> *dump () = 0;
+
+  /** @brief Use with \p name = \c \"consume_cb\" */
+  virtual Conf::ConfResult set (const std::string &name, ConsumeCb *consume_cb,
+				std::string &errstr) = 0;
+};
+
+/**@}*/
+
+
+/**
+ * @name Kafka base client handle
+ * @{
+ *
+ */
+
+/**
+ * @brief Base handle, super class for specific clients.
+ */
+class RD_EXPORT Handle {
+ public:
+  virtual ~Handle() { }
+
+  /** @returns the name of the handle */
+  virtual const std::string name () const = 0;
+
+  /**
+   * @brief Returns the client's broker-assigned group member id
+   *
+   * @remark This currently requires the high-level KafkaConsumer
+   *
+   * @returns Last assigned member id, or empty string if not currently
+   *          a group member.
+   */
+  virtual const std::string memberid () const = 0;
+
+
+  /**
+   * @brief Polls the provided kafka handle for events.
+   *
+   * Events will trigger application provided callbacks to be called.
+   *
+   * The \p timeout_ms argument specifies the maximum amount of time
+   * (in milliseconds) that the call will block waiting for events.
+   * For non-blocking calls, provide 0 as \p timeout_ms.
+   * To wait indefinately for events, provide -1.
+   *
+   * Events:
+   *   - delivery report callbacks (if an RdKafka::DeliveryCb is configured) [producer]
+   *   - event callbacks (if an RdKafka::EventCb is configured) [producer & consumer]
+   *
+   * @remark  An application should make sure to call poll() at regular
+   *          intervals to serve any queued callbacks waiting to be called.
+   *
+   * @warning This method MUST NOT be used with the RdKafka::KafkaConsumer,
+   *          use its RdKafka::KafkaConsumer::consume() instead.
+   *
+   * @returns the number of events served.
+   */
+  virtual int poll (int timeout_ms) = 0;
+
+  /**
+   * @brief  Returns the current out queue length
+   *
+   * The out queue contains messages and requests waiting to be sent to,
+   * or acknowledged by, the broker.
+   */
+  virtual int outq_len () = 0;
+
+  /**
+   * @brief Request Metadata from broker.
+   *
+   * Parameters:
+   *  \p all_topics  - if non-zero: request info about all topics in cluster,
+   *                   if zero: only request info about locally known topics.
+   *  \p only_rkt    - only request info about this topic
+   *  \p metadatap   - pointer to hold metadata result.
+   *                   The \p *metadatap pointer must be released with \c delete.
+   *  \p timeout_ms  - maximum response time before failing.
+   *
+   * @returns RdKafka::ERR_NO_ERROR on success (in which case \p *metadatap
+   * will be set), else RdKafka::ERR__TIMED_OUT on timeout or
+   * other error code on error.
+   */
+  virtual ErrorCode metadata (bool all_topics, const Topic *only_rkt,
+                              Metadata **metadatap, int timeout_ms) = 0;
+
+
+  /**
+   * @brief Pause producing or consumption for the provided list of partitions.
+   *
+   * Success or error is returned per-partition in the \p partitions list.
+   *
+   * @returns ErrorCode::NO_ERROR
+   *
+   * @sa resume()
+   */
+  virtual ErrorCode pause (std::vector<TopicPartition*> &partitions) = 0;
+
+
+  /**
+   * @brief Resume producing or consumption for the provided list of partitions.
+   *
+   * Success or error is returned per-partition in the \p partitions list.
+   *
+   * @returns ErrorCode::NO_ERROR
+   *
+   * @sa pause()
+   */
+  virtual ErrorCode resume (std::vector<TopicPartition*> &partitions) = 0;
+
+
+  /**
+   * @brief Query broker for low (oldest/beginning)
+   *        and high (newest/end) offsets for partition.
+   *
+   * Offsets are returned in \p *low and \p *high respectively.
+   *
+   * @returns RdKafka::ERR_NO_ERROR on success or an error code on failure.
+   */
+  virtual ErrorCode query_watermark_offsets (const std::string &topic,
+					     int32_t partition,
+					     int64_t *low, int64_t *high,
+					     int timeout_ms) = 0;
+
+  /**
+   * @brief Get last known low (oldest/beginning)
+   *        and high (newest/end) offsets for partition.
+   *
+   * The low offset is updated periodically (if statistics.interval.ms is set)
+   * while the high offset is updated on each fetched message set from the
+   * broker.
+   *
+   * If there is no cached offset (either low or high, or both) then
+   * OFFSET_INVALID will be returned for the respective offset.
+   *
+   * Offsets are returned in \p *low and \p *high respectively.
+   *
+   * @returns RdKafka::ERR_NO_ERROR on success or an error code on failure.
+   *
+   * @remark Shall only be used with an active consumer instance.
+   */
+  virtual ErrorCode get_watermark_offsets (const std::string &topic,
+					   int32_t partition,
+					   int64_t *low, int64_t *high) = 0;
+
+
+  /**
+   * @brief Look up the offsets for the given partitions by timestamp.
+   *
+   * The returned offset for each partition is the earliest offset whose
+   * timestamp is greater than or equal to the given timestamp in the
+   * corresponding partition.
+   *
+   * The timestamps to query are represented as \c offset in \p offsets
+   * on input, and \c offset() will return the closest earlier offset
+   * for the timestamp on output.
+   *
+   * The function will block for at most \p timeout_ms milliseconds.
+   *
+   * @remark Duplicate Topic+Partitions are not supported.
+   * @remark Errors are also returned per TopicPartition, see \c err()
+   *
+   * @returns an error code for general errors, else RdKafka::ERR_NO_ERROR
+   *          in which case per-partition errors might be set.
+   */
+  virtual ErrorCode offsetsForTimes (std::vector<TopicPartition*> &offsets,
+                                     int timeout_ms) = 0;
+
+
+  /**
+   * @brief Retrieve queue for a given partition.
+   *
+   * @returns The fetch queue for the given partition if successful. Else,
+   *          NULL is returned.
+   *          
+   * @remark This function only works on consumers.
+   */
+  virtual Queue *get_partition_queue (const TopicPartition *partition) = 0;
+
+  /**
+   * @brief Forward librdkafka logs (and debug) to the specified queue
+   *        for serving with one of the ..poll() calls.
+   *
+   *        This allows an application to serve log callbacks (\c log_cb)
+   *        in its thread of choice.
+   *
+   * @param queue Queue to forward logs to. If the value is NULL the logs
+   *        are forwarded to the main queue.
+   *
+   * @remark The configuration property \c log.queue MUST also be set to true.
+   *
+   * @remark librdkafka maintains its own reference to the provided queue.
+   *
+   * @returns ERR_NO_ERROR on success or an error code on error.
+   */
+  virtual ErrorCode set_log_queue (Queue *queue) = 0;
+
+  /**
+   * @brief Cancels the current callback dispatcher (Producer::poll(),
+   *        Consumer::poll(), KafkaConsumer::consume(), etc).
+   *
+   * A callback may use this to force an immediate return to the calling
+   * code (caller of e.g. ..::poll()) without processing any further
+   * events.
+   *
+   * @remark This function MUST ONLY be called from within a
+   *         librdkafka callback.
+   */
+  virtual void yield () = 0;
+
+  /**
+   * @brief Returns the ClusterId as reported in broker metadata.
+   *
+   * @param timeout_ms If there is no cached value from metadata retrieval
+   *                   then this specifies the maximum amount of time
+   *                   (in milliseconds) the call will block waiting
+   *                   for metadata to be retrieved.
+   *                   Use 0 for non-blocking calls.
+   *
+   * @remark Requires broker version >=0.10.0 and api.version.request=true.
+   *
+   * @returns Last cached ClusterId, or empty string if no ClusterId could be
+   *          retrieved in the allotted timespan.
+   */
+  virtual const std::string clusterid (int timeout_ms) = 0;
+
+  /**
+   * @brief Returns the underlying librdkafka C rd_kafka_t handle.
+   *
+   * @warning Calling the C API on this handle is not recommended and there
+   *          is no official support for it, but for cases where the C++
+   *          does not provide the proper functionality this C handle can be
+   *          used to interact directly with the core librdkafka API.
+   *
+   * @remark The lifetime of the returned pointer is the same as the Topic
+   *         object this method is called on.
+   *
+   * @remark Include <rdkafka/rdkafka.h> prior to including
+   *         <rdkafka/rdkafkacpp.h>
+   *
+   * @returns \c rd_kafka_t*
+   */
+  virtual struct rd_kafka_s *c_ptr () = 0;
+};
+
+
+/**@}*/
+
+
+/**
+ * @name Topic and partition objects
+ * @{
+ *
+ */
+
+/**
+ * @brief Topic+Partition
+ *
+ * This is a generic type to hold a single partition and various
+ * information about it.
+ *
+ * Is typically used with std::vector<RdKafka::TopicPartition*> to provide
+ * a list of partitions for different operations.
+ */
+class RD_EXPORT TopicPartition {
+public:
+  /**
+   * Create topic+partition object for \p topic and \p partition
+   * and optionally \p offset.
+   *
+   * Use \c delete to deconstruct.
+   */
+  static TopicPartition *create (const std::string &topic, int partition);
+  static TopicPartition *create (const std::string &topic, int partition,
+                                 int64_t offset);
+
+  virtual ~TopicPartition() = 0;
+
+  /**
+   * @brief Destroy/delete the TopicPartitions in \p partitions
+   *        and clear the vector.
+   */
+  static void destroy (std::vector<TopicPartition*> &partitions);
+
+  /** @returns topic name */
+  virtual const std::string &topic () const = 0;
+
+  /** @returns partition id */
+  virtual int partition () const = 0;
+
+  /** @returns offset (if applicable) */
+  virtual int64_t offset () const = 0;
+
+  /** @brief Set offset */
+  virtual void set_offset (int64_t offset) = 0;
+
+  /** @returns error code (if applicable) */
+  virtual ErrorCode err () const = 0;
+};
+
+
+
+/**
+ * @brief Topic handle
+ *
+ */
+class RD_EXPORT Topic {
+ public:
+  /**
+   * @brief Unassigned partition.
+   *
+   * The unassigned partition is used by the producer API for messages
+   * that should be partitioned using the configured or default partitioner.
+   */
+  static const int32_t PARTITION_UA;
+
+  /** @brief Special offsets */
+  static const int64_t OFFSET_BEGINNING; /**< Consume from beginning */
+  static const int64_t OFFSET_END; /**< Consume from end */
+  static const int64_t OFFSET_STORED; /**< Use offset storage */
+  static const int64_t OFFSET_INVALID; /**< Invalid offset */
+
+
+  /**
+   * @brief Creates a new topic handle for topic named \p topic_str
+   *
+   * \p conf is an optional configuration for the topic  that will be used
+   * instead of the default topic configuration.
+   * The \p conf object is reusable after this call.
+   *
+   * @returns the new topic handle or NULL on error (see \p errstr).
+   */
+  static Topic *create (Handle *base, const std::string &topic_str,
+                        Conf *conf, std::string &errstr);
+
+  virtual ~Topic () = 0;
+
+
+  /** @returns the topic name */
+  virtual const std::string name () const = 0;
+
+  /**
+   * @returns true if \p partition is available for the topic (has leader).
+   * @warning \b MUST \b ONLY be called from within a
+   *          RdKafka::PartitionerCb callback.
+   */
+  virtual bool partition_available (int32_t partition) const = 0;
+
+  /**
+   * @brief Store offset \p offset for topic partition \p partition.
+   * The offset will be committed (written) to the offset store according
+   * to \p auto.commit.interval.ms.
+   *
+   * @remark \c enable.auto.offset.store must be set to \c false when using this API.
+   *
+   * @returns RdKafka::ERR_NO_ERROR on success or an error code if none of the
+   *          offsets could be stored.
+   */
+  virtual ErrorCode offset_store (int32_t partition, int64_t offset) = 0;
+
+  /**
+   * @brief Returns the underlying librdkafka C rd_kafka_topic_t handle.
+   *
+   * @warning Calling the C API on this handle is not recommended and there
+   *          is no official support for it, but for cases where the C++ API
+   *          does not provide the underlying functionality this C handle can be
+   *          used to interact directly with the core librdkafka API.
+   *
+   * @remark The lifetime of the returned pointer is the same as the Topic
+   *         object this method is called on.
+   *
+   * @remark Include <rdkafka/rdkafka.h> prior to including
+   *         <rdkafka/rdkafkacpp.h>
+   *
+   * @returns \c rd_kafka_topic_t*
+   */
+  virtual struct rd_kafka_topic_s *c_ptr () = 0;
+};
+
+
+/**@}*/
+
+
+/**
+ * @name Message object
+ * @{
+ *
+ */
+
+
+/**
+ * @brief Message timestamp object
+ *
+ * Represents the number of milliseconds since the epoch (UTC).
+ *
+ * The MessageTimestampType dictates the timestamp type or origin.
+ *
+ * @remark Requires Apache Kafka broker version >= 0.10.0
+ *
+ */
+
+class RD_EXPORT MessageTimestamp {
+public:
+  enum MessageTimestampType {
+    MSG_TIMESTAMP_NOT_AVAILABLE,   /**< Timestamp not available */
+    MSG_TIMESTAMP_CREATE_TIME,     /**< Message creation time (source) */
+    MSG_TIMESTAMP_LOG_APPEND_TIME  /**< Message log append time (broker) */
+  };
+
+  MessageTimestampType type;       /**< Timestamp type */
+  int64_t timestamp;               /**< Milliseconds since epoch (UTC). */
+};
+
+
+
+/**
+ * @brief Message object
+ *
+ * This object represents either a single consumed or produced message,
+ * or an event (\p err() is set).
+ *
+ * An application must check RdKafka::Message::err() to see if the
+ * object is a proper message (error is RdKafka::ERR_NO_ERROR) or a
+ * an error event.
+ *
+ */
+class RD_EXPORT Message {
+ public:
+  /**
+   * @brief Accessor functions*
+   * @remark Not all fields are present in all types of callbacks.
+   */
+
+  /** @returns The error string if object represent an error event,
+   *           else an empty string. */
+  virtual std::string         errstr() const = 0;
+
+  /** @returns The error code if object represents an error event, else 0. */
+  virtual ErrorCode           err () const = 0;
+
+  /** @returns the RdKafka::Topic object for a message (if applicable),
+   *            or NULL if a corresponding RdKafka::Topic object has not been
+   *            explicitly created with RdKafka::Topic::create().
+   *            In this case use topic_name() instead. */
+  virtual Topic              *topic () const = 0;
+
+  /** @returns Topic name (if applicable, else empty string) */
+  virtual std::string         topic_name () const = 0;
+
+  /** @returns Partition (if applicable) */
+  virtual int32_t             partition () const = 0;
+
+  /** @returns Message payload (if applicable) */
+  virtual void               *payload () const = 0 ;
+
+  /** @returns Message payload length (if applicable) */
+  virtual size_t              len () const = 0;
+
+  /** @returns Message key as string (if applicable) */
+  virtual const std::string  *key () const = 0;
+
+  /** @returns Message key as void pointer  (if applicable) */
+  virtual const void         *key_pointer () const = 0 ;
+
+  /** @returns Message key's binary length (if applicable) */
+  virtual size_t              key_len () const = 0;
+
+  /** @returns Message or error offset (if applicable) */
+  virtual int64_t             offset () const = 0;
+
+  /** @returns Message timestamp (if applicable) */
+  virtual MessageTimestamp    timestamp () const = 0;
+
+  /** @returns The \p msg_opaque as provided to RdKafka::Producer::produce() */
+  virtual void               *msg_opaque () const = 0;
+
+  virtual ~Message () = 0;
+
+  /** @returns the latency in microseconds for a produced message measured
+   *           from the produce() call, or -1 if latency is not available. */
+  virtual int64_t             latency () const = 0;
+
+  /**
+   * @brief Returns the underlying librdkafka C rd_kafka_message_t handle.
+   *
+   * @warning Calling the C API on this handle is not recommended and there
+   *          is no official support for it, but for cases where the C++ API
+   *          does not provide the underlying functionality this C handle can be
+   *          used to interact directly with the core librdkafka API.
+   *
+   * @remark The lifetime of the returned pointer is the same as the Message
+   *         object this method is called on.
+   *
+   * @remark Include <rdkafka/rdkafka.h> prior to including
+   *         <rdkafka/rdkafkacpp.h>
+   *
+   * @returns \c rd_kafka_message_t*
+   */
+  virtual struct rd_kafka_message_s *c_ptr () = 0;
+};
+
+/**@}*/
+
+
+/**
+ * @name Queue interface
+ * @{
+ *
+ */
+
+
+/**
+ * @brief Queue interface
+ *
+ * Create a new message queue.  Message queues allows the application
+ * to re-route consumed messages from multiple topic+partitions into
+ * one single queue point.  This queue point, containing messages from
+ * a number of topic+partitions, may then be served by a single
+ * consume() method, rather than one per topic+partition combination.
+ *
+ * See the RdKafka::Consumer::start(), RdKafka::Consumer::consume(), and
+ * RdKafka::Consumer::consume_callback() methods that take a queue as the first
+ * parameter for more information.
+ */
+class RD_EXPORT Queue {
+ public:
+  /**
+   * @brief Create Queue object
+   */
+  static Queue *create (Handle *handle);
+
+  /**
+   * @brief Forward/re-route queue to \p dst.
+   * If \p dst is \c NULL, the forwarding is removed.
+   *
+   * The internal refcounts for both queues are increased.
+   * 
+   * @remark Regardless of whether \p dst is NULL or not, after calling this
+   *         function, \p src will not forward it's fetch queue to the consumer
+   *         queue.
+   */
+  virtual ErrorCode forward (Queue *dst) = 0;
+
+
+  /**
+   * @brief Consume message or get error event from the queue.
+   *
+   * @remark Use \c delete to free the message.
+   *
+   * @returns One of:
+   *  - proper message (RdKafka::Message::err() is ERR_NO_ERROR)
+   *  - error event (RdKafka::Message::err() is != ERR_NO_ERROR)
+   *  - timeout due to no message or event in \p timeout_ms
+   *    (RdKafka::Message::err() is ERR__TIMED_OUT)
+   */
+  virtual Message *consume (int timeout_ms) = 0;
+
+  /**
+   * @brief Poll queue, serving any enqueued callbacks.
+   *
+   * @remark Must NOT be used for queues containing messages.
+   *
+   * @returns the number of events served or 0 on timeout.
+   */
+  virtual int poll (int timeout_ms) = 0;
+
+  virtual ~Queue () = 0;
+
+  /**
+   * @brief Enable IO event triggering for queue.
+   *
+   * To ease integration with IO based polling loops this API
+   * allows an application to create a separate file-descriptor
+   * that librdkafka will write \p payload (of size \p size) to
+   * whenever a new element is enqueued on a previously empty queue.
+   *
+   * To remove event triggering call with \p fd = -1.
+   *
+   * librdkafka will maintain a copy of the \p payload.
+   *
+   * @remark When using forwarded queues the IO event must only be enabled
+   *         on the final forwarded-to (destination) queue.
+   */
+  virtual void io_event_enable (int fd, const void *payload, size_t size) = 0;
+};
+
+/**@}*/
+
+
+/**
+ * @name KafkaConsumer
+ * @{
+ *
+ */
+
+
+/**
+ * @brief High-level KafkaConsumer (for brokers 0.9 and later)
+ *
+ * @remark Requires Apache Kafka >= 0.9.0 brokers
+ *
+ * Currently supports the \c range and \c roundrobin partition assignment
+ * strategies (see \c partition.assignment.strategy)
+ */
+class RD_EXPORT KafkaConsumer : public virtual Handle {
+public:
+  /**
+   * @brief Creates a KafkaConsumer.
+   *
+   * The \p conf object must have \c group.id set to the consumer group to join.
+   *
+   * Use RdKafka::KafkaConsumer::close() to shut down the consumer.
+   *
+   * @sa RdKafka::RebalanceCb
+   * @sa CONFIGURATION.md for \c group.id, \c session.timeout.ms,
+   *     \c partition.assignment.strategy, etc.
+   */
+  static KafkaConsumer *create (Conf *conf, std::string &errstr);
+
+  virtual ~KafkaConsumer () = 0;
+
+
+  /** @brief Returns the current partition assignment as set by
+   *         RdKafka::KafkaConsumer::assign() */
+  virtual ErrorCode assignment (std::vector<RdKafka::TopicPartition*> &partitions) = 0;
+
+  /** @brief Returns the current subscription as set by
+   *         RdKafka::KafkaConsumer::subscribe() */
+  virtual ErrorCode subscription (std::vector<std::string> &topics) = 0;
+
+  /**
+   * @brief Update the subscription set to \p topics.
+   *
+   * Any previous subscription will be unassigned and  unsubscribed first.
+   *
+   * The subscription set denotes the desired topics to consume and this
+   * set is provided to the partition assignor (one of the elected group
+   * members) for all clients which then uses the configured
+   * \c partition.assignment.strategy to assign the subscription sets's
+   * topics's partitions to the consumers, depending on their subscription.
+   *
+   * The result of such an assignment is a rebalancing which is either
+   * handled automatically in librdkafka or can be overriden by the application
+   * by providing a RdKafka::RebalanceCb.
+   *
+   * The rebalancing passes the assigned partition set to
+   * RdKafka::KafkaConsumer::assign() to update what partitions are actually
+   * being fetched by the KafkaConsumer.
+   *
+   * Regex pattern matching automatically performed for topics prefixed
+   * with \c \"^\" (e.g. \c \"^myPfx[0-9]_.*\"
+   *
+   * @returns an error if the provided list of topics is invalid.
+   */
+  virtual ErrorCode subscribe (const std::vector<std::string> &topics) = 0;
+
+  /** @brief Unsubscribe from the current subscription set. */
+  virtual ErrorCode unsubscribe () = 0;
+
+  /**
+   *  @brief Update the assignment set to \p partitions.
+   *
+   * The assignment set is the set of partitions actually being consumed
+   * by the KafkaConsumer.
+   */
+  virtual ErrorCode assign (const std::vector<TopicPartition*> &partitions) = 0;
+
+  /**
+   * @brief Stop consumption and remove the current assignment.
+   */
+  virtual ErrorCode unassign () = 0;
+
+  /**
+   * @brief Consume message or get error event, triggers callbacks.
+   *
+   * Will automatically call registered callbacks for any such queued events,
+   * including RdKafka::RebalanceCb, RdKafka::EventCb, RdKafka::OffsetCommitCb,
+   * etc.
+   *
+   * @remark Use \c delete to free the message.
+   *
+   * @remark  An application should make sure to call consume() at regular
+   *          intervals, even if no messages are expected, to serve any
+   *          queued callbacks waiting to be called. This is especially
+   *          important when a RebalanceCb has been registered as it needs
+   *          to be called and handled properly to synchronize internal
+   *          consumer state.
+   *
+   * @remark Application MUST NOT call \p poll() on KafkaConsumer objects.
+   *
+   * @returns One of:
+   *  - proper message (RdKafka::Message::err() is ERR_NO_ERROR)
+   *  - error event (RdKafka::Message::err() is != ERR_NO_ERROR)
+   *  - timeout due to no message or event in \p timeout_ms
+   *    (RdKafka::Message::err() is ERR__TIMED_OUT)
+   */
+  virtual Message *consume (int timeout_ms) = 0;
+
+  /**
+   * @brief Commit offsets for the current assignment.
+   *
+   * @remark This is the synchronous variant that blocks until offsets
+   *         are committed or the commit fails (see return value).
+   *
+   * @remark If a RdKafka::OffsetCommitCb callback is registered it will
+   *         be called with commit details on a future call to
+   *         RdKafka::KafkaConsumer::consume()
+
+   *
+   * @returns ERR_NO_ERROR or error code.
+   */
+  virtual ErrorCode commitSync () = 0;
+
+  /**
+   * @brief Asynchronous version of RdKafka::KafkaConsumer::CommitSync()
+   *
+   * @sa RdKafka::KafkaConsummer::commitSync()
+   */
+  virtual ErrorCode commitAsync () = 0;
+
+  /**
+   * @brief Commit offset for a single topic+partition based on \p message
+   *
+   * @remark This is the synchronous variant.
+   *
+   * @sa RdKafka::KafkaConsummer::commitSync()
+   */
+  virtual ErrorCode commitSync (Message *message) = 0;
+
+  /**
+   * @brief Commit offset for a single topic+partition based on \p message
+   *
+   * @remark This is the asynchronous variant.
+   *
+   * @sa RdKafka::KafkaConsummer::commitSync()
+   */
+  virtual ErrorCode commitAsync (Message *message) = 0;
+
+  /**
+   * @brief Commit offsets for the provided list of partitions.
+   *
+   * @remark This is the synchronous variant.
+   */
+  virtual ErrorCode commitSync (std::vector<TopicPartition*> &offsets) = 0;
+
+  /**
+   * @brief Commit offset for the provided list of partitions.
+   *
+   * @remark This is the asynchronous variant.
+   */
+  virtual ErrorCode commitAsync (const std::vector<TopicPartition*> &offsets) = 0;
+
+  /**
+   * @brief Commit offsets for the current assignment.
+   *
+   * @remark This is the synchronous variant that blocks until offsets
+   *         are committed or the commit fails (see return value).
+   *
+   * @remark The provided callback will be called from this function.
+   *
+   * @returns ERR_NO_ERROR or error code.
+   */
+  virtual ErrorCode commitSync (OffsetCommitCb *offset_commit_cb) = 0;
+
+  /**
+   * @brief Commit offsets for the provided list of partitions.
+   *
+   * @remark This is the synchronous variant that blocks until offsets
+   *         are committed or the commit fails (see return value).
+   *
+   * @remark The provided callback will be called from this function.
+   *
+   * @returns ERR_NO_ERROR or error code.
+   */
+  virtual ErrorCode commitSync (std::vector<TopicPartition*> &offsets,
+                                OffsetCommitCb *offset_commit_cb) = 0;
+
+
+
+
+  /**
+   * @brief Retrieve committed offsets for topics+partitions.
+   *
+   * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the
+   *          \p offset or \p err field of each \p partitions' element is filled
+   *          in with the stored offset, or a partition specific error.
+   *          Else returns an error code.
+   */
+  virtual ErrorCode committed (std::vector<TopicPartition*> &partitions,
+			       int timeout_ms) = 0;
+
+  /**
+   * @brief Retrieve current positions (offsets) for topics+partitions.
+   *
+   * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the
+   *          \p offset or \p err field of each \p partitions' element is filled
+   *          in with the stored offset, or a partition specific error.
+   *          Else returns an error code.
+   */
+  virtual ErrorCode position (std::vector<TopicPartition*> &partitions) = 0;
+
+
+  /**
+   * For pausing and resuming consumption, see
+   * @sa RdKafka::Handle::pause() and RdKafka::Handle::resume()
+   */
+
+
+  /**
+   * @brief Close and shut down the proper.
+   *
+   * This call will block until the following operations are finished:
+   *  - Trigger a local rebalance to void the current assignment
+   *  - Stop consumption for current assignment
+   *  - Commit offsets
+   *  - Leave group
+   *
+   * The maximum blocking time is roughly limited to session.timeout.ms.
+   *
+   * @remark Callbacks, such as RdKafka::RebalanceCb and
+   *         RdKafka::OffsetCommitCb, etc, may be called.
+   *
+   * @remark The consumer object must later be freed with \c delete
+   */
+  virtual ErrorCode close () = 0;
+
+
+  /**
+   * @brief Seek consumer for topic+partition to offset which is either an
+   *        absolute or logical offset.
+   *
+   * If \p timeout_ms is not 0 the call will wait this long for the
+   * seek to be performed. If the timeout is reached the internal state
+   * will be unknown and this function returns `ERR__TIMED_OUT`.
+   * If \p timeout_ms is 0 it will initiate the seek but return
+   * immediately without any error reporting (e.g., async).
+   *
+   * This call triggers a fetch queue barrier flush.
+   *
+   * @remark Consumtion for the given partition must have started for the
+   *         seek to work. Use assign() to set the starting offset.
+   *
+   * @returns an ErrorCode to indicate success or failure.
+   */
+  virtual ErrorCode seek (const TopicPartition &partition, int timeout_ms) = 0;
+
+
+  /**
+   * @brief Store offset \p offset for topic partition \p partition.
+   * The offset will be committed (written) to the offset store according
+   * to \p auto.commit.interval.ms or the next manual offset-less commit*()
+   *
+   * Per-partition success/error status propagated through TopicPartition.err()
+   *
+   * @remark \c enable.auto.offset.store must be set to \c false when using this API.
+   *
+   * @returns RdKafka::ERR_NO_ERROR on success, or
+   *          RdKafka::ERR___UNKNOWN_PARTITION if none of the offsets could
+   *          be stored, or
+   *          RdKafka::ERR___INVALID_ARG if \c enable.auto.offset.store is true.
+   */
+  virtual ErrorCode offsets_store (std::vector<TopicPartition*> &offsets) = 0;
+};
+
+
+/**@}*/
+
+
+/**
+ * @name Simple Consumer (legacy)
+ * @{
+ *
+ */
+
+/**
+ * @brief Simple Consumer (legacy)
+ *
+ * A simple non-balanced, non-group-aware, consumer.
+ */
+class RD_EXPORT Consumer : public virtual Handle {
+ public:
+  /**
+   * @brief Creates a new Kafka consumer handle.
+   *
+   * \p conf is an optional object that will be used instead of the default
+   * configuration.
+   * The \p conf object is reusable after this call.
+   *
+   * @returns the new handle on success or NULL on error in which case
+   * \p errstr is set to a human readable error message.
+   */
+  static Consumer *create (Conf *conf, std::string &errstr);
+
+  virtual ~Consumer () = 0;
+
+
+  /**
+   * @brief Start consuming messages for topic and \p partition
+   * at offset \p offset which may either be a proper offset (0..N)
+   * or one of the the special offsets: \p OFFSET_BEGINNING or \p OFFSET_END.
+   *
+   * rdkafka will attempt to keep \p queued.min.messages (config property)
+   * messages in the local queue by repeatedly fetching batches of messages
+   * from the broker until the threshold is reached.
+   *
+   * The application shall use one of the \p ..->consume*() functions
+   * to consume messages from the local queue, each kafka message being
+   * represented as a `RdKafka::Message *` object.
+   *
+   * \p ..->start() must not be called multiple times for the same
+   * topic and partition without stopping consumption first with
+   * \p ..->stop().
+   *
+   * @returns an ErrorCode to indicate success or failure.
+   */
+  virtual ErrorCode start (Topic *topic, int32_t partition, int64_t offset) = 0;
+
+  /**
+   * @brief Start consuming messages for topic and \p partition on
+   *        queue \p queue.
+   *
+   * @sa RdKafka::Consumer::start()
+   */
+  virtual ErrorCode start (Topic *topic, int32_t partition, int64_t offset,
+                           Queue *queue) = 0;
+
+  /**
+   * @brief Stop consuming messages for topic and \p partition, purging
+   *        all messages currently in the local queue.
+   *
+   * The application needs to be stop all consumers before destroying
+   * the Consumer handle.
+   *
+   * @returns an ErrorCode to indicate success or failure.
+   */
+  virtual ErrorCode stop (Topic *topic, int32_t partition) = 0;
+
+  /**
+   * @brief Seek consumer for topic+partition to \p offset which is either an
+   *        absolute or logical offset.
+   *
+   * If \p timeout_ms is not 0 the call will wait this long for the
+   * seek to be performed. If the timeout is reached the internal state
+   * will be unknown and this function returns `ERR__TIMED_OUT`.
+   * If \p timeout_ms is 0 it will initiate the seek but return
+   * immediately without any error reporting (e.g., async).
+   *
+   * This call triggers a fetch queue barrier flush.
+   *
+   * @returns an ErrorCode to indicate success or failure.
+   */
+  virtual ErrorCode seek (Topic *topic, int32_t partition, int64_t offset,
+			  int timeout_ms) = 0;
+
+  /**
+   * @brief Consume a single message from \p topic and \p partition.
+   *
+   * \p timeout_ms is maximum amount of time to wait for a message to be
+   * received.
+   * Consumer must have been previously started with \p ..->start().
+   *
+   * @returns a Message object, the application needs to check if message
+   * is an error or a proper message RdKafka::Message::err() and checking for
+   * \p ERR_NO_ERROR.
+   *
+   * The message object must be destroyed when the application is done with it.
+   *
+   * Errors (in RdKafka::Message::err()):
+   *  - ERR__TIMED_OUT - \p timeout_ms was reached with no new messages fetched.
+   *  - ERR__PARTITION_EOF - End of partition reached, not an error.
+   */
+  virtual Message *consume (Topic *topic, int32_t partition,
+                            int timeout_ms) = 0;
+
+  /**
+   * @brief Consume a single message from the specified queue.
+   *
+   * \p timeout_ms is maximum amount of time to wait for a message to be
+   * received.
+   * Consumer must have been previously started on the queue with
+   * \p ..->start().
+   *
+   * @returns a Message object, the application needs to check if message
+   * is an error or a proper message \p Message->err() and checking for
+   * \p ERR_NO_ERROR.
+   *
+   * The message object must be destroyed when the application is done with it.
+   *
+   * Errors (in RdKafka::Message::err()):
+   *   - ERR__TIMED_OUT - \p timeout_ms was reached with no new messages fetched
+   *
+   * Note that Message->topic() may be nullptr after certain kinds of
+   * errors, so applications should check that it isn't null before
+   * dereferencing it.
+   */
+  virtual Message *consume (Queue *queue, int timeout_ms) = 0;
+
+  /**
+   * @brief Consumes messages from \p topic and \p partition, calling
+   *        the provided callback for each consumed messsage.
+   *
+   * \p consume_callback() provides higher throughput performance
+   * than \p consume().
+   *
+   * \p timeout_ms is the maximum amount of time to wait for one or
+   * more messages to arrive.
+   *
+   * The provided \p consume_cb instance has its \p consume_cb function
+   * called for every message received.
+   *
+   * The \p opaque argument is passed to the \p consume_cb as \p opaque.
+   *
+   * @returns the number of messages processed or -1 on error.
+   *
+   * @sa RdKafka::Consumer::consume()
+   */
+  virtual int consume_callback (Topic *topic, int32_t partition,
+                                int timeout_ms,
+                                ConsumeCb *consume_cb,
+                                void *opaque) = 0;
+
+  /**
+   * @brief Consumes messages from \p queue, calling the provided callback for
+   *        each consumed messsage.
+   *
+   * @sa RdKafka::Consumer::consume_callback()
+   */
+  virtual int consume_callback (Queue *queue, int timeout_ms,
+                                RdKafka::ConsumeCb *consume_cb,
+                                void *opaque) = 0;
+
+  /**
+   * @brief Converts an offset into the logical offset from the tail of a topic.
+   *
+   * \p offset is the (positive) number of items from the end.
+   *
+   * @returns the logical offset for message \p offset from the tail, this value
+   *          may be passed to Consumer::start, et.al.
+   * @remark The returned logical offset is specific to librdkafka.
+   */
+  static int64_t OffsetTail(int64_t offset);
+};
+
+/**@}*/
+
+
+/**
+ * @name Producer
+ * @{
+ *
+ */
+
+
+/**
+ * @brief Producer
+ */
+class RD_EXPORT Producer : public virtual Handle {
+ public:
+  /**
+   * @brief Creates a new Kafka producer handle.
+   *
+   * \p conf is an optional object that will be used instead of the default
+   * configuration.
+   * The \p conf object is reusable after this call.
+   *
+   * @returns the new handle on success or NULL on error in which case
+   *          \p errstr is set to a human readable error message.
+   */
+  static Producer *create (Conf *conf, std::string &errstr);
+
+
+  virtual ~Producer () = 0;
+
+  /**
+   * @brief RdKafka::Producer::produce() \p msgflags
+   *
+   * These flags are optional and mutually exclusive.
+   */
+  enum {
+    RK_MSG_FREE = 0x1, /**< rdkafka will free(3) \p payload
+                         * when it is done with it. */
+    RK_MSG_COPY = 0x2, /**< the \p payload data will be copied
+                        * and the \p payload pointer will not
+                        * be used by rdkafka after the
+                        * call returns. */
+    RK_MSG_BLOCK = 0x4  /**< Block produce*() on message queue
+                         *   full.
+                         *   WARNING:
+                         *   If a delivery report callback
+                         *   is used the application MUST
+                         *   call rd_kafka_poll() (or equiv.)
+                         *   to make sure delivered messages
+                         *   are drained from the internal
+                         *   delivery report queue.
+                         *   Failure to do so will result
+                         *   in indefinately blocking on
+                         *   the produce() call when the
+                         *   message queue is full.
+                         */
+
+
+  /**@cond NO_DOC*/
+  /* For backwards compatibility: */
+#ifndef MSG_COPY /* defined in sys/msg.h */
+    , /** this comma must exist betwen
+       *  RK_MSG_BLOCK and MSG_FREE
+       */
+    MSG_FREE = RK_MSG_FREE,
+    MSG_COPY = RK_MSG_COPY
+#endif
+  /**@endcond*/
+  };
+
+  /**
+   * @brief Produce and send a single message to broker.
+   *
+   * This is an asynch non-blocking API.
+   *
+   * \p partition is the target partition, either:
+   *   - RdKafka::Topic::PARTITION_UA (unassigned) for
+   *     automatic partitioning using the topic's partitioner function, or
+   *   - a fixed partition (0..N)
+   *
+   * \p msgflags is zero or more of the following flags OR:ed together:
+   *    RK_MSG_BLOCK - block \p produce*() call if
+   *                   \p queue.buffering.max.messages or
+   *                   \p queue.buffering.max.kbytes are exceeded.
+   *                   Messages are considered in-queue from the point they
+   *                   are accepted by produce() until their corresponding
+   *                   delivery report callback/event returns.
+   *                   It is thus a requirement to call 
+   *                   poll() (or equiv.) from a separate
+   *                   thread when RK_MSG_BLOCK is used.
+   *                   See WARNING on \c RK_MSG_BLOCK above.
+   *    RK_MSG_FREE - rdkafka will free(3) \p payload when it is done with it.
+   *    RK_MSG_COPY - the \p payload data will be copied and the \p payload
+   *               pointer will not be used by rdkafka after the
+   *               call returns.
+   *
+   *  NOTE: RK_MSG_FREE and RK_MSG_COPY are mutually exclusive.
+   *
+   *  If the function returns an error code and RK_MSG_FREE was specified, then
+   *  the memory associated with the payload is still the caller's
+   *  responsibility.
+   *
+   * \p payload is the message payload of size \p len bytes.
+   *
+   * \p key is an optional message key, if non-NULL it
+   * will be passed to the topic partitioner as well as be sent with the
+   * message to the broker and passed on to the consumer.
+   *
+   * \p msg_opaque is an optional application-provided per-message opaque
+   * pointer that will provided in the delivery report callback (\p dr_cb) for
+   * referencing this message.
+   *
+   * @returns an ErrorCode to indicate success or failure:
+   *  - ERR_NO_ERROR           - message successfully enqueued for transmission.
+   *
+   *  - ERR__QUEUE_FULL        - maximum number of outstanding messages has been
+   *                             reached: \c queue.buffering.max.message
+   *
+   *  - ERR_MSG_SIZE_TOO_LARGE - message is larger than configured max size:
+   *                            \c messages.max.bytes
+   *
+   *  - ERR__UNKNOWN_PARTITION - requested \p partition is unknown in the
+   *                           Kafka cluster.
+   *
+   *  - ERR__UNKNOWN_TOPIC     - topic is unknown in the Kafka cluster.
+   */
+  virtual ErrorCode produce (Topic *topic, int32_t partition,
+                             int msgflags,
+                             void *payload, size_t len,
+                             const std::string *key,
+                             void *msg_opaque) = 0;
+
+  /**
+   * @brief Variant produce() that passes the key as a pointer and length
+   *        instead of as a const std::string *.
+   */
+  virtual ErrorCode produce (Topic *topic, int32_t partition,
+                             int msgflags,
+                             void *payload, size_t len,
+                             const void *key, size_t key_len,
+                             void *msg_opaque) = 0;
+
+  /**
+   * @brief produce() variant that takes topic as a string (no need for
+   *        creating a Topic object), and also allows providing the
+   *        message timestamp (microseconds since beginning of epoch, UTC).
+   *        Otherwise identical to produce() above.
+   */
+  virtual ErrorCode produce (const std::string topic_name, int32_t partition,
+                             int msgflags,
+                             void *payload, size_t len,
+                             const void *key, size_t key_len,
+                             int64_t timestamp,
+                             void *msg_opaque) = 0;
+
+
+  /**
+   * @brief Variant produce() that accepts vectors for key and payload.
+   *        The vector data will be copied.
+   */
+  virtual ErrorCode produce (Topic *topic, int32_t partition,
+                             const std::vector<char> *payload,
+                             const std::vector<char> *key,
+                             void *msg_opaque) = 0;
+
+
+  /**
+   * @brief Wait until all outstanding produce requests, et.al, are completed.
+   *        This should typically be done prior to destroying a producer instance
+   *        to make sure all queued and in-flight produce requests are completed
+   *        before terminating.
+   *
+   * @remark This function will call poll() and thus trigger callbacks.
+   *
+   * @returns ERR__TIMED_OUT if \p timeout_ms was reached before all
+   *          outstanding requests were completed, else ERR_NO_ERROR
+   */
+  virtual ErrorCode flush (int timeout_ms) = 0;
+};
+
+/**@}*/
+
+
+/**
+ * @name Metadata interface
+ * @{
+ *
+ */
+
+
+/**
+ * @brief Metadata: Broker information
+ */
+class BrokerMetadata {
+ public:
+  /** @returns Broker id */
+  virtual int32_t id() const = 0;
+
+  /** @returns Broker hostname */
+  virtual const std::string host() const = 0;
+
+  /** @returns Broker listening port */
+  virtual int port() const = 0;
+
+  virtual ~BrokerMetadata() = 0;
+};
+
+
+
+/**
+ * @brief Metadata: Partition information
+ */
+class PartitionMetadata {
+ public:
+  /** @brief Replicas */
+  typedef std::vector<int32_t> ReplicasVector;
+  /** @brief ISRs (In-Sync-Replicas) */
+  typedef std::vector<int32_t> ISRSVector;
+
+  /** @brief Replicas iterator */
+  typedef ReplicasVector::const_iterator ReplicasIterator;
+  /** @brief ISRs iterator */
+  typedef ISRSVector::const_iterator     ISRSIterator;
+
+
+  /** @returns Partition id */
+  virtual int32_t id() const = 0;
+
+  /** @returns Partition error reported by broker */
+  virtual ErrorCode err() const = 0;
+
+  /** @returns Leader broker (id) for partition */
+  virtual int32_t leader() const = 0;
+
+  /** @returns Replica brokers */
+  virtual const std::vector<int32_t> *replicas() const = 0;
+
+  /** @returns In-Sync-Replica brokers
+   *  @warning The broker may return a cached/outdated list of ISRs.
+   */
+  virtual const std::vector<int32_t> *isrs() const = 0;
+
+  virtual ~PartitionMetadata() = 0;
+};
+
+
+
+/**
+ * @brief Metadata: Topic information
+ */
+class TopicMetadata {
+ public:
+  /** @brief Partitions */
+  typedef std::vector<const PartitionMetadata*> PartitionMetadataVector;
+  /** @brief Partitions iterator */
+  typedef PartitionMetadataVector::const_iterator PartitionMetadataIterator;
+
+  /** @returns Topic name */
+  virtual const std::string topic() const = 0;
+
+  /** @returns Partition list */
+  virtual const PartitionMetadataVector *partitions() const = 0;
+
+  /** @returns Topic error reported by broker */
+  virtual ErrorCode err() const = 0;
+
+  virtual ~TopicMetadata() = 0;
+};
+
+
+/**
+ * @brief Metadata container
+ */
+class Metadata {
+ public:
+  /** @brief Brokers */
+  typedef std::vector<const BrokerMetadata*> BrokerMetadataVector;
+  /** @brief Topics */
+  typedef std::vector<const TopicMetadata*>  TopicMetadataVector;
+
+  /** @brief Brokers iterator */
+  typedef BrokerMetadataVector::const_iterator BrokerMetadataIterator;
+  /** @brief Topics iterator */
+  typedef TopicMetadataVector::const_iterator  TopicMetadataIterator;
+
+
+  /** @brief Broker list */
+  virtual const BrokerMetadataVector *brokers() const = 0;
+
+  /** @brief Topic list */
+  virtual const TopicMetadataVector  *topics() const = 0;
+
+  /** @brief Broker (id) originating this metadata */
+  virtual int32_t orig_broker_id() const = 0;
+
+  /** @brief Broker (name) originating this metadata */
+  virtual const std::string orig_broker_name() const = 0;
+
+  virtual ~Metadata() = 0;
+};
+
+/**@}*/
+
+}
+
+#endif /* _RDKAFKACPP_H_ */


[42/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src-cpp/rdkafkacpp_int.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src-cpp/rdkafkacpp_int.h b/thirdparty/librdkafka-0.11.1/src-cpp/rdkafkacpp_int.h
deleted file mode 100644
index d231d20..0000000
--- a/thirdparty/librdkafka-0.11.1/src-cpp/rdkafkacpp_int.h
+++ /dev/null
@@ -1,897 +0,0 @@
-/*
- * librdkafka - Apache Kafka C/C++ library
- *
- * Copyright (c) 2014 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include <string>
-#include <iostream>
-#include <cstring>
-#include <stdlib.h>
-
-#include "rdkafkacpp.h"
-
-extern "C" {
-#include "../src/rdkafka.h"
-}
-
-#ifdef _MSC_VER
-typedef int mode_t;
-#pragma warning(disable : 4250)
-#endif
-
-
-namespace RdKafka {
-
-
-void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque);
-void log_cb_trampoline (const rd_kafka_t *rk, int level,
-                        const char *fac, const char *buf);
-void error_cb_trampoline (rd_kafka_t *rk, int err, const char *reason,
-                          void *opaque);
-void throttle_cb_trampoline (rd_kafka_t *rk, const char *broker_name,
-			     int32_t broker_id, int throttle_time_ms,
-			     void *opaque);
-int stats_cb_trampoline (rd_kafka_t *rk, char *json, size_t json_len,
-                         void *opaque);
-int socket_cb_trampoline (int domain, int type, int protocol, void *opaque);
-int open_cb_trampoline (const char *pathname, int flags, mode_t mode,
-                        void *opaque);
-void rebalance_cb_trampoline (rd_kafka_t *rk,
-                              rd_kafka_resp_err_t err,
-                              rd_kafka_topic_partition_list_t *c_partitions,
-                              void *opaque);
-void offset_commit_cb_trampoline0 (
-        rd_kafka_t *rk,
-        rd_kafka_resp_err_t err,
-        rd_kafka_topic_partition_list_t *c_offsets, void *opaque);
-
-rd_kafka_topic_partition_list_t *
-    partitions_to_c_parts (const std::vector<TopicPartition*> &partitions);
-
-/**
- * @brief Update the application provided 'partitions' with info from 'c_parts'
- */
-void update_partitions_from_c_parts (std::vector<TopicPartition*> &partitions,
-                                     const rd_kafka_topic_partition_list_t *c_parts);
-
-
-class EventImpl : public Event {
- public:
-  ~EventImpl () {};
-
-  EventImpl (Type type, ErrorCode err, Severity severity,
-             const char *fac, const char *str):
-  type_(type), err_(err), severity_(severity), fac_(fac ? fac : ""),
-	  str_(str), id_(0), throttle_time_(0) {};
-
-  EventImpl (Type type):
-  type_(type), err_(ERR_NO_ERROR), severity_(EVENT_SEVERITY_EMERG),
-	  fac_(""), str_(""), id_(0), throttle_time_(0) {};
-
-  Type        type () const { return type_; }
-  ErrorCode   err () const { return err_; }
-  Severity    severity () const { return severity_; }
-  std::string fac () const { return fac_; }
-  std::string str () const { return str_; }
-  std::string broker_name () const {
-	  if (type_ == EVENT_THROTTLE)
-		  return str_;
-	  else
-		  return std::string("");
-  }
-  int         broker_id () const { return id_; }
-  int         throttle_time () const { return throttle_time_; }
-
-  Type        type_;
-  ErrorCode   err_;
-  Severity    severity_;
-  std::string fac_;
-  std::string str_;         /* reused for THROTTLE broker_name */
-  int         id_;
-  int         throttle_time_;
-};
-
-
-class MessageImpl : public Message {
- public:
-  ~MessageImpl () {
-    if (free_rkmessage_)
-      rd_kafka_message_destroy(const_cast<rd_kafka_message_t *>(rkmessage_));
-    if (key_)
-            delete key_;
-  };
-
-  MessageImpl (RdKafka::Topic *topic, rd_kafka_message_t *rkmessage):
-  topic_(topic), rkmessage_(rkmessage), free_rkmessage_(true), key_(NULL) {}
-
-  MessageImpl (RdKafka::Topic *topic, rd_kafka_message_t *rkmessage,
-               bool dofree):
-  topic_(topic), rkmessage_(rkmessage), free_rkmessage_(dofree), key_(NULL) { }
-
-  MessageImpl (RdKafka::Topic *topic, const rd_kafka_message_t *rkmessage):
-  topic_(topic), rkmessage_(rkmessage), free_rkmessage_(false), key_(NULL) { }
-
-  MessageImpl (rd_kafka_message_t *rkmessage):
-  topic_(NULL), rkmessage_(rkmessage), free_rkmessage_(true), key_(NULL) {
-    if (rkmessage->rkt) {
-      /* Possibly NULL */
-      topic_ = static_cast<Topic *>(rd_kafka_topic_opaque(rkmessage->rkt));
-    }
-  }
-
-  /* Create errored message */
-  MessageImpl (RdKafka::Topic *topic, RdKafka::ErrorCode err):
-  topic_(topic), free_rkmessage_(false), key_(NULL) {
-    rkmessage_ = &rkmessage_err_;
-    memset(&rkmessage_err_, 0, sizeof(rkmessage_err_));
-    rkmessage_err_.err = static_cast<rd_kafka_resp_err_t>(err);
-  }
-
-  std::string         errstr() const {
-    /* FIXME: If there is an error string in payload (for consume_cb)
-     *        it wont be shown since 'payload' is reused for errstr
-     *        and we cant distinguish between consumer and producer.
-     *        For the producer case the payload needs to be the original
-     *        payload pointer. */
-    const char *es = rd_kafka_err2str(rkmessage_->err);
-    return std::string(es ? es : "");
-  }
-
-  ErrorCode           err () const {
-    return static_cast<RdKafka::ErrorCode>(rkmessage_->err);
-  }
-
-  Topic              *topic () const { return topic_; }
-  std::string         topic_name  () const {
-          if (rkmessage_->rkt)
-                  return rd_kafka_topic_name(rkmessage_->rkt);
-          else
-                  return "";
-  }
-  int32_t             partition () const { return rkmessage_->partition; }
-  void               *payload () const { return rkmessage_->payload; }
-  size_t              len () const { return rkmessage_->len; }
-  const std::string  *key () const {
-    if (key_) {
-      return key_;
-    } else if (rkmessage_->key) {
-      key_ = new std::string(static_cast<char const*>(rkmessage_->key), rkmessage_->key_len);
-      return key_;
-    }
-    return NULL;
-  }
-  const void         *key_pointer () const { return rkmessage_->key; }
-  size_t              key_len () const { return rkmessage_->key_len; }
-
-  int64_t             offset () const { return rkmessage_->offset; }
-
-  MessageTimestamp   timestamp () const {
-	  MessageTimestamp ts;
-	  rd_kafka_timestamp_type_t tstype;
-	  ts.timestamp = rd_kafka_message_timestamp(rkmessage_, &tstype);
-	  ts.type = static_cast<MessageTimestamp::MessageTimestampType>(tstype);
-	  return ts;
-  }
-
-  void               *msg_opaque () const { return rkmessage_->_private; };
-
-  int64_t             latency () const {
-          return rd_kafka_message_latency(rkmessage_);
-  }
-
-  RdKafka::Topic *topic_;
-  const rd_kafka_message_t *rkmessage_;
-  bool free_rkmessage_;
-  /* For error signalling by the C++ layer the .._err_ message is
-   * used as a place holder and rkmessage_ is set to point to it. */
-  rd_kafka_message_t rkmessage_err_;
-  mutable std::string *key_; /* mutable because it's a cached value */
-
-private:
-  /* "delete" copy ctor + copy assignment, for safety of key_ */
-  MessageImpl(MessageImpl const&) /*= delete*/;
-  MessageImpl& operator=(MessageImpl const&) /*= delete*/;
-};
-
-
-class ConfImpl : public Conf {
- public:
-  ConfImpl()
-      :consume_cb_(NULL),
-      dr_cb_(NULL),
-      event_cb_(NULL),
-      socket_cb_(NULL),
-      open_cb_(NULL),
-      partitioner_cb_(NULL),
-      partitioner_kp_cb_(NULL),
-      rebalance_cb_(NULL),
-      offset_commit_cb_(NULL),
-      rk_conf_(NULL),
-      rkt_conf_(NULL){}
-  ~ConfImpl () {
-    if (rk_conf_)
-      rd_kafka_conf_destroy(rk_conf_);
-    else if (rkt_conf_)
-      rd_kafka_topic_conf_destroy(rkt_conf_);
-  }
-
-  Conf::ConfResult set(const std::string &name,
-                       const std::string &value,
-                       std::string &errstr);
-
-  Conf::ConfResult set (const std::string &name, DeliveryReportCb *dr_cb,
-                        std::string &errstr) {
-    if (name != "dr_cb") {
-      errstr = "Invalid value type, expected RdKafka::DeliveryReportCb";
-      return Conf::CONF_INVALID;
-    }
-
-    if (!rk_conf_) {
-      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
-      return Conf::CONF_INVALID;
-    }
-
-    dr_cb_ = dr_cb;
-    return Conf::CONF_OK;
-  }
-
-  Conf::ConfResult set (const std::string &name, EventCb *event_cb,
-                        std::string &errstr) {
-    if (name != "event_cb") {
-      errstr = "Invalid value type, expected RdKafka::EventCb";
-      return Conf::CONF_INVALID;
-    }
-
-    if (!rk_conf_) {
-      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
-      return Conf::CONF_INVALID;
-    }
-
-    event_cb_ = event_cb;
-    return Conf::CONF_OK;
-  }
-
-  Conf::ConfResult set (const std::string &name, const Conf *topic_conf,
-                        std::string &errstr) {
-    const ConfImpl *tconf_impl =
-        dynamic_cast<const RdKafka::ConfImpl *>(topic_conf);
-    if (name != "default_topic_conf" || !tconf_impl->rkt_conf_) {
-      errstr = "Invalid value type, expected RdKafka::Conf";
-      return Conf::CONF_INVALID;
-    }
-
-    if (!rk_conf_) {
-      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
-      return Conf::CONF_INVALID;
-    }
-
-    rd_kafka_conf_set_default_topic_conf(rk_conf_,
-                                         rd_kafka_topic_conf_dup(tconf_impl->
-                                                                 rkt_conf_));
-
-    return Conf::CONF_OK;
-  }
-
-  Conf::ConfResult set (const std::string &name, PartitionerCb *partitioner_cb,
-                        std::string &errstr) {
-    if (name != "partitioner_cb") {
-      errstr = "Invalid value type, expected RdKafka::PartitionerCb";
-      return Conf::CONF_INVALID;
-    }
-
-    if (!rkt_conf_) {
-      errstr = "Requires RdKafka::Conf::CONF_TOPIC object";
-      return Conf::CONF_INVALID;
-    }
-
-    partitioner_cb_ = partitioner_cb;
-    return Conf::CONF_OK;
-  }
-
-  Conf::ConfResult set (const std::string &name,
-                        PartitionerKeyPointerCb *partitioner_kp_cb,
-                        std::string &errstr) {
-    if (name != "partitioner_key_pointer_cb") {
-      errstr = "Invalid value type, expected RdKafka::PartitionerKeyPointerCb";
-      return Conf::CONF_INVALID;
-    }
-
-    if (!rkt_conf_) {
-      errstr = "Requires RdKafka::Conf::CONF_TOPIC object";
-      return Conf::CONF_INVALID;
-    }
-
-    partitioner_kp_cb_ = partitioner_kp_cb;
-    return Conf::CONF_OK;
-  }
-
-  Conf::ConfResult set (const std::string &name, SocketCb *socket_cb,
-                        std::string &errstr) {
-    if (name != "socket_cb") {
-      errstr = "Invalid value type, expected RdKafka::SocketCb";
-      return Conf::CONF_INVALID;
-    }
-
-    if (!rk_conf_) {
-      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
-      return Conf::CONF_INVALID;
-    }
-
-    socket_cb_ = socket_cb;
-    return Conf::CONF_OK;
-  }
-
-
-  Conf::ConfResult set (const std::string &name, OpenCb *open_cb,
-                        std::string &errstr) {
-    if (name != "open_cb") {
-      errstr = "Invalid value type, expected RdKafka::OpenCb";
-      return Conf::CONF_INVALID;
-    }
-
-    if (!rk_conf_) {
-      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
-      return Conf::CONF_INVALID;
-    }
-
-    open_cb_ = open_cb;
-    return Conf::CONF_OK;
-  }
-
-
-
-
-  Conf::ConfResult set (const std::string &name, RebalanceCb *rebalance_cb,
-                        std::string &errstr) {
-    if (name != "rebalance_cb") {
-      errstr = "Invalid value type, expected RdKafka::RebalanceCb";
-      return Conf::CONF_INVALID;
-    }
-
-    if (!rk_conf_) {
-      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
-      return Conf::CONF_INVALID;
-    }
-
-    rebalance_cb_ = rebalance_cb;
-    return Conf::CONF_OK;
-  }
-
-
-  Conf::ConfResult set (const std::string &name,
-                        OffsetCommitCb *offset_commit_cb,
-                        std::string &errstr) {
-    if (name != "offset_commit_cb") {
-      errstr = "Invalid value type, expected RdKafka::OffsetCommitCb";
-      return Conf::CONF_INVALID;
-    }
-
-    if (!rk_conf_) {
-      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
-      return Conf::CONF_INVALID;
-    }
-
-    offset_commit_cb_ = offset_commit_cb;
-    return Conf::CONF_OK;
-  }
-
-  Conf::ConfResult get(const std::string &name, std::string &value) const {
-    if (name.compare("dr_cb") == 0 ||
-        name.compare("event_cb") == 0 ||
-        name.compare("partitioner_cb") == 0 ||
-        name.compare("partitioner_key_pointer_cb") == 0 ||
-        name.compare("socket_cb") == 0 ||
-        name.compare("open_cb") == 0 ||
-        name.compare("rebalance_cb") == 0 ||
-        name.compare("offset_commit_cb") == 0 ) {
-      return Conf::CONF_INVALID;
-    }
-    rd_kafka_conf_res_t res = RD_KAFKA_CONF_INVALID;
-
-    /* Get size of property */
-    size_t size;
-    if (rk_conf_)
-      res = rd_kafka_conf_get(rk_conf_,
-                              name.c_str(), NULL, &size);
-    else if (rkt_conf_)
-      res = rd_kafka_topic_conf_get(rkt_conf_,
-                                    name.c_str(), NULL, &size);
-    if (res != RD_KAFKA_CONF_OK)
-      return static_cast<Conf::ConfResult>(res);
-
-    char *tmpValue = new char[size];
-
-    if (rk_conf_)
-      res = rd_kafka_conf_get(rk_conf_, name.c_str(),
-                              tmpValue, &size);
-    else if (rkt_conf_)
-      res = rd_kafka_topic_conf_get(rkt_conf_,
-                                    name.c_str(), NULL, &size);
-
-    if (res == RD_KAFKA_CONF_OK)
-      value.assign(tmpValue);
-    delete[] tmpValue;
-
-    return static_cast<Conf::ConfResult>(res);
-  }
-
-  Conf::ConfResult get(DeliveryReportCb *&dr_cb) const {
-      if (!rk_conf_)
-	  return Conf::CONF_INVALID;
-      dr_cb = this->dr_cb_;
-      return Conf::CONF_OK;
-  }
-
-  Conf::ConfResult get(EventCb *&event_cb) const {
-      if (!rk_conf_)
-	  return Conf::CONF_INVALID;
-      event_cb = this->event_cb_;
-      return Conf::CONF_OK;
-  }
-
-  Conf::ConfResult get(PartitionerCb *&partitioner_cb) const {
-      if (!rkt_conf_)
-	  return Conf::CONF_INVALID;
-      partitioner_cb = this->partitioner_cb_;
-      return Conf::CONF_OK;
-  }
-
-  Conf::ConfResult get(PartitionerKeyPointerCb *&partitioner_kp_cb) const {
-      if (!rkt_conf_)
-	  return Conf::CONF_INVALID;
-      partitioner_kp_cb = this->partitioner_kp_cb_;
-      return Conf::CONF_OK;
-  }
-
-  Conf::ConfResult get(SocketCb *&socket_cb) const {
-      if (!rk_conf_)
-	  return Conf::CONF_INVALID;
-      socket_cb = this->socket_cb_;
-      return Conf::CONF_OK;
-  }
-
-  Conf::ConfResult get(OpenCb *&open_cb) const {
-      if (!rk_conf_)
-	  return Conf::CONF_INVALID;
-      open_cb = this->open_cb_;
-      return Conf::CONF_OK;
-  }
-
-  Conf::ConfResult get(RebalanceCb *&rebalance_cb) const {
-      if (!rk_conf_)
-	  return Conf::CONF_INVALID;
-      rebalance_cb = this->rebalance_cb_;
-      return Conf::CONF_OK;
-  }
-
-  Conf::ConfResult get(OffsetCommitCb *&offset_commit_cb) const {
-      if (!rk_conf_)
-	  return Conf::CONF_INVALID;
-      offset_commit_cb = this->offset_commit_cb_;
-      return Conf::CONF_OK;
-    }
-
-
-
-  std::list<std::string> *dump ();
-
-
-  Conf::ConfResult set (const std::string &name, ConsumeCb *consume_cb,
-                        std::string &errstr) {
-    if (name != "consume_cb") {
-      errstr = "Invalid value type, expected RdKafka::ConsumeCb";
-      return Conf::CONF_INVALID;
-    }
-
-    if (!rk_conf_) {
-      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
-      return Conf::CONF_INVALID;
-    }
-
-    consume_cb_ = consume_cb;
-    return Conf::CONF_OK;
-  }
-
-
-  ConsumeCb *consume_cb_;
-  DeliveryReportCb *dr_cb_;
-  EventCb *event_cb_;
-  SocketCb *socket_cb_;
-  OpenCb *open_cb_;
-  PartitionerCb *partitioner_cb_;
-  PartitionerKeyPointerCb *partitioner_kp_cb_;
-  RebalanceCb *rebalance_cb_;
-  OffsetCommitCb *offset_commit_cb_;
-  ConfType conf_type_;
-  rd_kafka_conf_t *rk_conf_;
-  rd_kafka_topic_conf_t *rkt_conf_;
-};
-
-
-class HandleImpl : virtual public Handle {
- public:
-  ~HandleImpl() {};
-  HandleImpl () {};
-  const std::string name () const { return std::string(rd_kafka_name(rk_)); };
-  const std::string memberid () const {
-	  char *str = rd_kafka_memberid(rk_);
-	  std::string memberid = str ? str : "";
-	  if (str)
-		  rd_kafka_mem_free(rk_, str);
-	  return memberid;
-  }
-  int poll (int timeout_ms) { return rd_kafka_poll(rk_, timeout_ms); };
-  int outq_len () { return rd_kafka_outq_len(rk_); };
-
-  void set_common_config (RdKafka::ConfImpl *confimpl);
-
-  RdKafka::ErrorCode metadata (bool all_topics,const Topic *only_rkt,
-            Metadata **metadatap, int timeout_ms);
-
-  ErrorCode pause (std::vector<TopicPartition*> &partitions);
-  ErrorCode resume (std::vector<TopicPartition*> &partitions);
-
-  ErrorCode query_watermark_offsets (const std::string &topic,
-				     int32_t partition,
-				     int64_t *low, int64_t *high,
-				     int timeout_ms) {
-    return static_cast<RdKafka::ErrorCode>(
-        rd_kafka_query_watermark_offsets(
-            rk_, topic.c_str(), partition,
-            low, high, timeout_ms));
-  }
-
-  ErrorCode get_watermark_offsets (const std::string &topic,
-                                   int32_t partition,
-                                   int64_t *low, int64_t *high) {
-    return static_cast<RdKafka::ErrorCode>(
-        rd_kafka_get_watermark_offsets(
-            rk_, topic.c_str(), partition,
-            low, high));
-  }
-
-  Queue *get_partition_queue (const TopicPartition *partition);
-
-  ErrorCode offsetsForTimes (std::vector<TopicPartition*> &offsets,
-                             int timeout_ms) {
-    rd_kafka_topic_partition_list_t *c_offsets = partitions_to_c_parts(offsets);
-    ErrorCode err = static_cast<ErrorCode>(
-        rd_kafka_offsets_for_times(rk_, c_offsets, timeout_ms));
-    update_partitions_from_c_parts(offsets, c_offsets);
-    rd_kafka_topic_partition_list_destroy(c_offsets);
-    return err;
-  }
-
-  ErrorCode set_log_queue (Queue *queue);
-
-  void yield () {
-    rd_kafka_yield(rk_);
-  }
-
-  const std::string clusterid (int timeout_ms) {
-          char *str = rd_kafka_clusterid(rk_, timeout_ms);
-          std::string clusterid = str ? str : "";
-          if (str)
-                  rd_kafka_mem_free(rk_, str);
-          return clusterid;
-  }
-
-  rd_kafka_t *rk_;
-  /* All Producer and Consumer callbacks must reside in HandleImpl and
-   * the opaque provided to rdkafka must be a pointer to HandleImpl, since
-   * ProducerImpl and ConsumerImpl classes cannot be safely directly cast to
-   * HandleImpl due to the skewed diamond inheritance. */
-  ConsumeCb *consume_cb_;
-  EventCb *event_cb_;
-  SocketCb *socket_cb_;
-  OpenCb *open_cb_;
-  DeliveryReportCb *dr_cb_;
-  PartitionerCb *partitioner_cb_;
-  PartitionerKeyPointerCb *partitioner_kp_cb_;
-  RebalanceCb *rebalance_cb_;
-  OffsetCommitCb *offset_commit_cb_;
-};
-
-
-class TopicImpl : public Topic {
- public:
-  ~TopicImpl () {
-    rd_kafka_topic_destroy(rkt_);
-  }
-
-  const std::string name () const {
-    return rd_kafka_topic_name(rkt_);
-  }
-
-  bool partition_available (int32_t partition) const {
-    return !!rd_kafka_topic_partition_available(rkt_, partition);
-  }
-
-  ErrorCode offset_store (int32_t partition, int64_t offset) {
-    return static_cast<RdKafka::ErrorCode>(
-        rd_kafka_offset_store(rkt_, partition, offset));
-  }
-
-  static Topic *create (Handle &base, const std::string &topic,
-                        Conf *conf);
-
-  rd_kafka_topic_t *rkt_;
-  PartitionerCb *partitioner_cb_;
-  PartitionerKeyPointerCb *partitioner_kp_cb_;
-};
-
-
-/**
- * Topic and Partition
- */
-class TopicPartitionImpl : public TopicPartition {
-public:
-  ~TopicPartitionImpl() {};
-
-  static TopicPartition *create (const std::string &topic, int partition);
-
-  TopicPartitionImpl (const std::string &topic, int partition):
-  topic_(topic), partition_(partition), offset_(RdKafka::Topic::OFFSET_INVALID),
-      err_(ERR_NO_ERROR) {}
-
-  TopicPartitionImpl (const std::string &topic, int partition, int64_t offset):
-  topic_(topic), partition_(partition), offset_(offset),
-          err_(ERR_NO_ERROR) {}
-
-  TopicPartitionImpl (const rd_kafka_topic_partition_t *c_part) {
-    topic_ = std::string(c_part->topic);
-    partition_ = c_part->partition;
-    offset_ = c_part->offset;
-    err_ = static_cast<ErrorCode>(c_part->err);
-    // FIXME: metadata
-  }
-
-  static void destroy (std::vector<TopicPartition*> &partitions);
-
-  int partition () const { return partition_; }
-  const std::string &topic () const { return topic_ ; }
-
-  int64_t offset () const { return offset_; }
-
-  ErrorCode err () const { return err_; }
-
-  void set_offset (int64_t offset) { offset_ = offset; }
-
-  std::ostream& operator<<(std::ostream &ostrm) const {
-    return ostrm << topic_ << " [" << partition_ << "]";
-  }
-
-  std::string topic_;
-  int partition_;
-  int64_t offset_;
-  ErrorCode err_;
-};
-
-
-
-class KafkaConsumerImpl : virtual public KafkaConsumer, virtual public HandleImpl {
-public:
-  ~KafkaConsumerImpl () {
-
-  }
-
-  static KafkaConsumer *create (Conf *conf, std::string &errstr);
-
-  ErrorCode assignment (std::vector<TopicPartition*> &partitions);
-  ErrorCode subscription (std::vector<std::string> &topics);
-  ErrorCode subscribe (const std::vector<std::string> &topics);
-  ErrorCode unsubscribe ();
-  ErrorCode assign (const std::vector<TopicPartition*> &partitions);
-  ErrorCode unassign ();
-
-  Message *consume (int timeout_ms);
-  ErrorCode commitSync () {
-    return static_cast<ErrorCode>(rd_kafka_commit(rk_, NULL, 0/*sync*/));
-  }
-  ErrorCode commitAsync () {
-    return static_cast<ErrorCode>(rd_kafka_commit(rk_, NULL, 1/*async*/));
-  }
-  ErrorCode commitSync (Message *message) {
-	  MessageImpl *msgimpl = dynamic_cast<MessageImpl*>(message);
-	  return static_cast<ErrorCode>(
-                  rd_kafka_commit_message(rk_, msgimpl->rkmessage_, 0/*sync*/));
-  }
-  ErrorCode commitAsync (Message *message) {
-	  MessageImpl *msgimpl = dynamic_cast<MessageImpl*>(message);
-	  return static_cast<ErrorCode>(
-                  rd_kafka_commit_message(rk_, msgimpl->rkmessage_,1/*async*/));
-  }
-
-  ErrorCode commitSync (std::vector<TopicPartition*> &offsets) {
-	  rd_kafka_topic_partition_list_t *c_parts =
-		  partitions_to_c_parts(offsets);
-	  rd_kafka_resp_err_t err =
-		  rd_kafka_commit(rk_, c_parts, 0);
-	  if (!err)
-		  update_partitions_from_c_parts(offsets, c_parts);
-	  rd_kafka_topic_partition_list_destroy(c_parts);
-	  return static_cast<ErrorCode>(err);
-  }
-
-  ErrorCode commitAsync (const std::vector<TopicPartition*> &offsets) {
-	  rd_kafka_topic_partition_list_t *c_parts =
-		  partitions_to_c_parts(offsets);
-	  rd_kafka_resp_err_t err =
-		  rd_kafka_commit(rk_, c_parts, 1);
-	  rd_kafka_topic_partition_list_destroy(c_parts);
-	  return static_cast<ErrorCode>(err);
-  }
-
-  ErrorCode commitSync (OffsetCommitCb *offset_commit_cb) {
-          return static_cast<ErrorCode>(
-                  rd_kafka_commit_queue(rk_, NULL, NULL,
-                                        RdKafka::offset_commit_cb_trampoline0,
-                                        offset_commit_cb));
-  }
-
-  ErrorCode commitSync (std::vector<TopicPartition*> &offsets,
-                        OffsetCommitCb *offset_commit_cb) {
-          rd_kafka_topic_partition_list_t *c_parts =
-                  partitions_to_c_parts(offsets);
-          rd_kafka_resp_err_t err =
-                  rd_kafka_commit_queue(rk_, c_parts, NULL,
-                                        RdKafka::offset_commit_cb_trampoline0,
-                                        offset_commit_cb);
-          rd_kafka_topic_partition_list_destroy(c_parts);
-          return static_cast<ErrorCode>(err);
-  }
-
-  ErrorCode committed (std::vector<TopicPartition*> &partitions, int timeout_ms);
-  ErrorCode position (std::vector<TopicPartition*> &partitions);
-
-  ErrorCode close ();
-
-  ErrorCode seek (const TopicPartition &partition, int timeout_ms);
-
-  ErrorCode offsets_store (std::vector<TopicPartition*> &offsets) {
-          rd_kafka_topic_partition_list_t *c_parts =
-                  partitions_to_c_parts(offsets);
-          rd_kafka_resp_err_t err =
-                  rd_kafka_offsets_store(rk_, c_parts);
-          update_partitions_from_c_parts(offsets, c_parts);
-          rd_kafka_topic_partition_list_destroy(c_parts);
-          return static_cast<ErrorCode>(err);
-  }
-
-};
-
-
-class MetadataImpl : public Metadata {
- public:
-  MetadataImpl(const rd_kafka_metadata_t *metadata);
-  ~MetadataImpl();
-
-  const std::vector<const BrokerMetadata *> *brokers() const {
-    return &brokers_;
-  }
-
-  const std::vector<const TopicMetadata *>  *topics() const {
-    return &topics_;
-  }
-
-  const std::string orig_broker_name() const {
-    return std::string(metadata_->orig_broker_name);
-  }
-
-  int32_t orig_broker_id() const {
-    return metadata_->orig_broker_id;
-  }
-
-private:
-  const rd_kafka_metadata_t *metadata_;
-  std::vector<const BrokerMetadata *> brokers_;
-  std::vector<const TopicMetadata *> topics_;
-  std::string orig_broker_name_;
-};
-
-
-class QueueImpl : virtual public Queue {
- public:
-  ~QueueImpl () {
-    rd_kafka_queue_destroy(queue_);
-  }
-  static Queue *create (Handle *base);
-  ErrorCode forward (Queue *queue);
-  Message *consume (int timeout_ms);
-  int poll (int timeout_ms);
-
-  rd_kafka_queue_t *queue_;
-};
-
-
-
-
-
-class ConsumerImpl : virtual public Consumer, virtual public HandleImpl {
- public:
-  ~ConsumerImpl () {
-    rd_kafka_destroy(rk_); };
-  static Consumer *create (Conf *conf, std::string &errstr);
-
-  ErrorCode start (Topic *topic, int32_t partition, int64_t offset);
-  ErrorCode start (Topic *topic, int32_t partition, int64_t offset,
-                   Queue *queue);
-  ErrorCode stop (Topic *topic, int32_t partition);
-  ErrorCode seek (Topic *topic, int32_t partition, int64_t offset,
-		  int timeout_ms);
-  Message *consume (Topic *topic, int32_t partition, int timeout_ms);
-  Message *consume (Queue *queue, int timeout_ms);
-  int consume_callback (Topic *topic, int32_t partition, int timeout_ms,
-                        ConsumeCb *cb, void *opaque);
-  int consume_callback (Queue *queue, int timeout_ms,
-                        RdKafka::ConsumeCb *consume_cb, void *opaque);
-};
-
-
-
-class ProducerImpl : virtual public Producer, virtual public HandleImpl {
-
- public:
-  ~ProducerImpl () { if (rk_) rd_kafka_destroy(rk_); };
-
-  ErrorCode produce (Topic *topic, int32_t partition,
-                     int msgflags,
-                     void *payload, size_t len,
-                     const std::string *key,
-                     void *msg_opaque);
-
-  ErrorCode produce (Topic *topic, int32_t partition,
-                     int msgflags,
-                     void *payload, size_t len,
-                     const void *key, size_t key_len,
-                     void *msg_opaque);
-
-  ErrorCode produce (Topic *topic, int32_t partition,
-                     const std::vector<char> *payload,
-                     const std::vector<char> *key,
-                     void *msg_opaque);
-
-  ErrorCode produce (const std::string topic_name, int32_t partition,
-                     int msgflags,
-                     void *payload, size_t len,
-                     const void *key, size_t key_len,
-                     int64_t timestamp,
-                     void *msg_opaque);
-
-  ErrorCode flush (int timeout_ms) {
-	  return static_cast<RdKafka::ErrorCode>(rd_kafka_flush(rk_,
-								timeout_ms));
-  }
-
-  static Producer *create (Conf *conf, std::string &errstr);
-
-};
-
-
-
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/CMakeLists.txt b/thirdparty/librdkafka-0.11.1/src/CMakeLists.txt
deleted file mode 100644
index 17bfb23..0000000
--- a/thirdparty/librdkafka-0.11.1/src/CMakeLists.txt
+++ /dev/null
@@ -1,128 +0,0 @@
-set(
-    sources
-    crc32c.c
-    rdaddr.c
-    rdavl.c
-    rdbuf.c
-    rdcrc32.c
-    rdkafka.c
-    rdkafka_assignor.c
-    rdkafka_broker.c
-    rdkafka_buf.c
-    rdkafka_cgrp.c
-    rdkafka_conf.c
-    rdkafka_event.c
-    rdkafka_feature.c
-    rdkafka_lz4.c
-    rdkafka_metadata.c
-    rdkafka_metadata_cache.c
-    rdkafka_msg.c
-    rdkafka_msgset_reader.c
-    rdkafka_msgset_writer.c
-    rdkafka_offset.c
-    rdkafka_op.c
-    rdkafka_partition.c
-    rdkafka_pattern.c
-    rdkafka_queue.c
-    rdkafka_range_assignor.c
-    rdkafka_request.c
-    rdkafka_roundrobin_assignor.c
-    rdkafka_sasl.c
-    rdkafka_sasl_plain.c
-    rdkafka_subscription.c
-    rdkafka_timer.c
-    rdkafka_topic.c
-    rdkafka_transport.c
-    rdkafka_interceptor.c
-    rdlist.c
-    rdlog.c
-    rdports.c
-    rdrand.c
-    rdregex.c
-    rdstring.c
-    rdunittest.c
-    rdvarint.c
-    snappy.c
-    tinycthread.c
-    xxhash.c
-    lz4.c
-    lz4frame.c
-    lz4hc.c
-)
-
-if(WITH_LIBDL)
-    list(APPEND sources rddl.c)
-endif()
-
-if(WITH_PLUGINS)
-    list(APPEND sources rdkafka_plugin.c)
-endif()
-
-if(WITH_SASL_SCRAM)
-  list(APPEND sources rdkafka_sasl_win32.c)
-elseif(WITH_SASL_CYRUS)
-  list(APPEND sources rdkafka_sasl_cyrus.c)
-endif()
-
-if(WITH_ZLIB)
-  list(APPEND sources rdgz.c)
-endif()
-
-if(NOT HAVE_REGEX)
-  list(APPEND sources regexp.c)
-endif()
-
-add_library(rdkafka ${sources})
-
-# Support '#include <rdkafka.h>'
-target_include_directories(rdkafka PUBLIC "$<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}>")
-
-# We need 'dummy' directory to support `#include "../config.h"` path
-set(dummy "${GENERATED_DIR}/dummy")
-file(MAKE_DIRECTORY "${dummy}")
-target_include_directories(rdkafka PUBLIC "$<BUILD_INTERFACE:${dummy}>")
-
-if(WITH_ZLIB)
-  find_package(ZLIB REQUIRED)
-  target_link_libraries(rdkafka PUBLIC ZLIB::ZLIB)
-endif()
-
-if(WITH_SSL)
-  if(WITH_BUNDLED_SSL) # option from 'h2o' parent project
-    if(NOT TARGET bundled-ssl)
-      message(FATAL_ERROR "bundled-ssl target not exist")
-    endif()
-    target_include_directories(rdkafka BEFORE PUBLIC ${BUNDLED_SSL_INCLUDE_DIR})
-    target_link_libraries(rdkafka PUBLIC ${BUNDLED_SSL_LIBRARIES})
-    add_dependencies(rdkafka bundled-ssl)
-  else()
-    find_package(OpenSSL REQUIRED)
-    target_link_libraries(rdkafka PUBLIC OpenSSL::SSL OpenSSL::Crypto)
-  endif()
-endif()
-
-if(LINK_ATOMIC)
-  target_link_libraries(rdkafka PUBLIC "-latomic")
-endif()
-
-find_package(Threads REQUIRED)
-target_link_libraries(rdkafka PUBLIC Threads::Threads)
-
-if(WITH_SASL_CYRUS)
-  pkg_check_modules(SASL REQUIRED libsasl2)
-  target_link_libraries(rdkafka PUBLIC ${SASL_LIBRARIES})
-endif()
-
-install(
-    TARGETS rdkafka
-    EXPORT "${targets_export_name}"
-    LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}"
-    ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}"
-    RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
-    INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
-)
-
-install(
-    FILES "rdkafka.h"
-    DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/librdkafka"
-)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/Makefile
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/Makefile b/thirdparty/librdkafka-0.11.1/src/Makefile
deleted file mode 100644
index 780edaa..0000000
--- a/thirdparty/librdkafka-0.11.1/src/Makefile
+++ /dev/null
@@ -1,81 +0,0 @@
-PKGNAME=	librdkafka
-LIBNAME=	librdkafka
-LIBVER=		1
-
--include ../Makefile.config
-
-ifneq ($(wildcard ../.git),)
-# Add librdkafka version string from git tag if this is a git checkout
-CPPFLAGS += -DLIBRDKAFKA_GIT_VERSION="\"$(shell git describe --abbrev=6 --dirty --tags 2>/dev/null)\""
-endif
-
-SRCS_$(WITH_SASL_CYRUS) += rdkafka_sasl_cyrus.c
-SRCS_$(WITH_SASL_SCRAM) += rdkafka_sasl_scram.c
-SRCS_$(WITH_SNAPPY) += snappy.c
-SRCS_$(WITH_ZLIB) += rdgz.c
-
-SRCS_LZ4 = xxhash.c
-ifneq ($(WITH_LZ4_EXT), y)
-# Use built-in liblz4
-SRCS_LZ4 += lz4.c lz4frame.c lz4hc.c
-endif
-SRCS_y += rdkafka_lz4.c $(SRCS_LZ4)
-
-SRCS_$(WITH_LIBDL) += rddl.c
-SRCS_$(WITH_PLUGINS) += rdkafka_plugin.c
-
-ifeq ($(HAVE_REGEX), n)
-SRCS_y += regexp.c
-endif
-
-SRCS=		rdkafka.c rdkafka_broker.c rdkafka_msg.c rdkafka_topic.c \
-		rdkafka_conf.c rdkafka_timer.c rdkafka_offset.c \
-		rdkafka_transport.c rdkafka_buf.c rdkafka_queue.c rdkafka_op.c \
-		rdkafka_request.c rdkafka_cgrp.c rdkafka_pattern.c \
-		rdkafka_partition.c rdkafka_subscription.c \
-		rdkafka_assignor.c rdkafka_range_assignor.c \
-		rdkafka_roundrobin_assignor.c rdkafka_feature.c \
-		rdcrc32.c crc32c.c rdaddr.c rdrand.c rdlist.c tinycthread.c \
-		rdlog.c rdstring.c rdkafka_event.c rdkafka_metadata.c \
-		rdregex.c rdports.c rdkafka_metadata_cache.c rdavl.c \
-		rdkafka_sasl.c rdkafka_sasl_plain.c rdkafka_interceptor.c \
-		rdkafka_msgset_writer.c rdkafka_msgset_reader.c \
-		rdvarint.c rdbuf.c rdunittest.c \
-		$(SRCS_y)
-
-HDRS=		rdkafka.h
-
-OBJS=		$(SRCS:.c=.o)
-
-
-all: lib check
-
-include ../mklove/Makefile.base
-
-CHECK_FILES+= $(LIBFILENAME) $(LIBNAME).a
-
-file-check: lib
-check: file-check
-	@(printf "%-30s " "Symbol visibility" ; \
-		(($(SYMDUMPER) $(LIBFILENAME) | grep rd_kafka_new >/dev/null) && \
-		($(SYMDUMPER) $(LIBFILENAME) | grep -v rd_kafka_destroy >/dev/null) && \
-		printf "$(MKL_GREEN)OK$(MKL_CLR_RESET)\n") || \
-		printf "$(MKL_RED)FAILED$(MKL_CLR_RESET)\n")
-
-install: lib-install
-
-clean: lib-clean
-
-# Compile LZ4 with -O3
-$(SRCS_LZ4:.c=.o): CFLAGS:=$(CFLAGS) -O3
-
-ifeq ($(WITH_LDS),y)
-# Enable linker script if supported by platform
-LIB_LDFLAGS+= $(LDFLAG_LINKERSCRIPT)$(LIBNAME).lds
-endif
-
-$(LIBNAME).lds: $(HDRS)
-	@(printf "$(MKL_YELLOW)Generating linker script $@ from $(HDRS)$(MKL_CLR_RESET)\n" ; \
-	  cat $(HDRS) | ../lds-gen.py > $@)
-
--include $(DEPS)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/crc32c.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/crc32c.c b/thirdparty/librdkafka-0.11.1/src/crc32c.c
deleted file mode 100644
index cd58147..0000000
--- a/thirdparty/librdkafka-0.11.1/src/crc32c.c
+++ /dev/null
@@ -1,427 +0,0 @@
-/* Copied from http://stackoverflow.com/a/17646775/1821055
- * with the following modifications:
- *   * remove test code
- *   * global hw/sw initialization to be called once per process
- *   * HW support is determined by configure's WITH_CRC32C_HW
- *   * Windows porting (no hardware support on Windows yet)
- *
- * FIXME:
- *   * Hardware support on Windows (MSVC assembler)
- *   * Hardware support on ARM
- */
-
-/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction
- * Copyright (C) 2013 Mark Adler
- * Version 1.1  1 Aug 2013  Mark Adler
- */
-
-/*
-  This software is provided 'as-is', without any express or implied
-  warranty.  In no event will the author be held liable for any damages
-  arising from the use of this software.
-
-  Permission is granted to anyone to use this software for any purpose,
-  including commercial applications, and to alter it and redistribute it
-  freely, subject to the following restrictions:
-
-  1. The origin of this software must not be misrepresented; you must not
-     claim that you wrote the original software. If you use this software
-     in a product, an acknowledgment in the product documentation would be
-     appreciated but is not required.
-  2. Altered source versions must be plainly marked as such, and must not be
-     misrepresented as being the original software.
-  3. This notice may not be removed or altered from any source distribution.
-
-  Mark Adler
-  madler@alumni.caltech.edu
- */
-
-/* Use hardware CRC instruction on Intel SSE 4.2 processors.  This computes a
-   CRC-32C, *not* the CRC-32 used by Ethernet and zip, gzip, etc.  A software
-   version is provided as a fall-back, as well as for speed comparisons. */
-
-/* Version history:
-   1.0  10 Feb 2013  First version
-   1.1   1 Aug 2013  Correct comments on why three crc instructions in parallel
- */
-
-#include "rd.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#ifndef _MSC_VER
-#include <unistd.h>
-#endif
-
-#include "rdunittest.h"
-
-#include "crc32c.h"
-
-/* CRC-32C (iSCSI) polynomial in reversed bit order. */
-#define POLY 0x82f63b78
-
-/* Table for a quadword-at-a-time software crc. */
-static uint32_t crc32c_table[8][256];
-
-/* Construct table for software CRC-32C calculation. */
-static void crc32c_init_sw(void)
-{
-    uint32_t n, crc, k;
-
-    for (n = 0; n < 256; n++) {
-        crc = n;
-        crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
-        crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
-        crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
-        crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
-        crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
-        crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
-        crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
-        crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
-        crc32c_table[0][n] = crc;
-    }
-    for (n = 0; n < 256; n++) {
-        crc = crc32c_table[0][n];
-        for (k = 1; k < 8; k++) {
-            crc = crc32c_table[0][crc & 0xff] ^ (crc >> 8);
-            crc32c_table[k][n] = crc;
-        }
-    }
-}
-
-/* Table-driven software version as a fall-back.  This is about 15 times slower
-   than using the hardware instructions.  This assumes little-endian integers,
-   as is the case on Intel processors that the assembler code here is for. */
-static uint32_t crc32c_sw(uint32_t crci, const void *buf, size_t len)
-{
-    const unsigned char *next = buf;
-    uint64_t crc;
-
-    crc = crci ^ 0xffffffff;
-    while (len && ((uintptr_t)next & 7) != 0) {
-        crc = crc32c_table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8);
-        len--;
-    }
-    while (len >= 8) {
-#if defined(__sparc) || defined(__sparc__) || defined(__APPLE__)
-        /* Alignment-safe alternative.
-         * This is also needed on Apple to avoid compilation warnings for
-         * non-appearant alignment reasons. */
-        uint64_t ncopy;
-        memcpy(&ncopy, next, sizeof(ncopy));
-        crc ^= ncopy;
-#else
-        crc ^= *(uint64_t *)next;
-#endif
-        crc = crc32c_table[7][crc & 0xff] ^
-              crc32c_table[6][(crc >> 8) & 0xff] ^
-              crc32c_table[5][(crc >> 16) & 0xff] ^
-              crc32c_table[4][(crc >> 24) & 0xff] ^
-              crc32c_table[3][(crc >> 32) & 0xff] ^
-              crc32c_table[2][(crc >> 40) & 0xff] ^
-              crc32c_table[1][(crc >> 48) & 0xff] ^
-              crc32c_table[0][crc >> 56];
-        next += 8;
-        len -= 8;
-    }
-    while (len) {
-        crc = crc32c_table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8);
-        len--;
-    }
-    return (uint32_t)crc ^ 0xffffffff;
-}
-
-
-#if WITH_CRC32C_HW
-static int sse42;  /* Cached SSE42 support */
-
-/* Multiply a matrix times a vector over the Galois field of two elements,
-   GF(2).  Each element is a bit in an unsigned integer.  mat must have at
-   least as many entries as the power of two for most significant one bit in
-   vec. */
-static RD_INLINE uint32_t gf2_matrix_times(uint32_t *mat, uint32_t vec)
-{
-    uint32_t sum;
-
-    sum = 0;
-    while (vec) {
-        if (vec & 1)
-            sum ^= *mat;
-        vec >>= 1;
-        mat++;
-    }
-    return sum;
-}
-
-/* Multiply a matrix by itself over GF(2).  Both mat and square must have 32
-   rows. */
-static RD_INLINE void gf2_matrix_square(uint32_t *square, uint32_t *mat)
-{
-    int n;
-
-    for (n = 0; n < 32; n++)
-        square[n] = gf2_matrix_times(mat, mat[n]);
-}
-
-/* Construct an operator to apply len zeros to a crc.  len must be a power of
-   two.  If len is not a power of two, then the result is the same as for the
-   largest power of two less than len.  The result for len == 0 is the same as
-   for len == 1.  A version of this routine could be easily written for any
-   len, but that is not needed for this application. */
-static void crc32c_zeros_op(uint32_t *even, size_t len)
-{
-    int n;
-    uint32_t row;
-    uint32_t odd[32];       /* odd-power-of-two zeros operator */
-
-    /* put operator for one zero bit in odd */
-    odd[0] = POLY;              /* CRC-32C polynomial */
-    row = 1;
-    for (n = 1; n < 32; n++) {
-        odd[n] = row;
-        row <<= 1;
-    }
-
-    /* put operator for two zero bits in even */
-    gf2_matrix_square(even, odd);
-
-    /* put operator for four zero bits in odd */
-    gf2_matrix_square(odd, even);
-
-    /* first square will put the operator for one zero byte (eight zero bits),
-       in even -- next square puts operator for two zero bytes in odd, and so
-       on, until len has been rotated down to zero */
-    do {
-        gf2_matrix_square(even, odd);
-        len >>= 1;
-        if (len == 0)
-            return;
-        gf2_matrix_square(odd, even);
-        len >>= 1;
-    } while (len);
-
-    /* answer ended up in odd -- copy to even */
-    for (n = 0; n < 32; n++)
-        even[n] = odd[n];
-}
-
-/* Take a length and build four lookup tables for applying the zeros operator
-   for that length, byte-by-byte on the operand. */
-static void crc32c_zeros(uint32_t zeros[][256], size_t len)
-{
-    uint32_t n;
-    uint32_t op[32];
-
-    crc32c_zeros_op(op, len);
-    for (n = 0; n < 256; n++) {
-        zeros[0][n] = gf2_matrix_times(op, n);
-        zeros[1][n] = gf2_matrix_times(op, n << 8);
-        zeros[2][n] = gf2_matrix_times(op, n << 16);
-        zeros[3][n] = gf2_matrix_times(op, n << 24);
-    }
-}
-
-/* Apply the zeros operator table to crc. */
-static RD_INLINE uint32_t crc32c_shift(uint32_t zeros[][256], uint32_t crc)
-{
-    return zeros[0][crc & 0xff] ^ zeros[1][(crc >> 8) & 0xff] ^
-           zeros[2][(crc >> 16) & 0xff] ^ zeros[3][crc >> 24];
-}
-
-/* Block sizes for three-way parallel crc computation.  LONG and SHORT must
-   both be powers of two.  The associated string constants must be set
-   accordingly, for use in constructing the assembler instructions. */
-#define LONG 8192
-#define LONGx1 "8192"
-#define LONGx2 "16384"
-#define SHORT 256
-#define SHORTx1 "256"
-#define SHORTx2 "512"
-
-/* Tables for hardware crc that shift a crc by LONG and SHORT zeros. */
-static uint32_t crc32c_long[4][256];
-static uint32_t crc32c_short[4][256];
-
-/* Initialize tables for shifting crcs. */
-static void crc32c_init_hw(void)
-{
-    crc32c_zeros(crc32c_long, LONG);
-    crc32c_zeros(crc32c_short, SHORT);
-}
-
-/* Compute CRC-32C using the Intel hardware instruction. */
-static uint32_t crc32c_hw(uint32_t crc, const void *buf, size_t len)
-{
-    const unsigned char *next = buf;
-    const unsigned char *end;
-    uint64_t crc0, crc1, crc2;      /* need to be 64 bits for crc32q */
-
-    /* pre-process the crc */
-    crc0 = crc ^ 0xffffffff;
-
-    /* compute the crc for up to seven leading bytes to bring the data pointer
-       to an eight-byte boundary */
-    while (len && ((uintptr_t)next & 7) != 0) {
-        __asm__("crc32b\t" "(%1), %0"
-                : "=r"(crc0)
-                : "r"(next), "0"(crc0));
-        next++;
-        len--;
-    }
-
-    /* compute the crc on sets of LONG*3 bytes, executing three independent crc
-       instructions, each on LONG bytes -- this is optimized for the Nehalem,
-       Westmere, Sandy Bridge, and Ivy Bridge architectures, which have a
-       throughput of one crc per cycle, but a latency of three cycles */
-    while (len >= LONG*3) {
-        crc1 = 0;
-        crc2 = 0;
-        end = next + LONG;
-        do {
-            __asm__("crc32q\t" "(%3), %0\n\t"
-                    "crc32q\t" LONGx1 "(%3), %1\n\t"
-                    "crc32q\t" LONGx2 "(%3), %2"
-                    : "=r"(crc0), "=r"(crc1), "=r"(crc2)
-                    : "r"(next), "0"(crc0), "1"(crc1), "2"(crc2));
-            next += 8;
-        } while (next < end);
-        crc0 = crc32c_shift(crc32c_long, crc0) ^ crc1;
-        crc0 = crc32c_shift(crc32c_long, crc0) ^ crc2;
-        next += LONG*2;
-        len -= LONG*3;
-    }
-
-    /* do the same thing, but now on SHORT*3 blocks for the remaining data less
-       than a LONG*3 block */
-    while (len >= SHORT*3) {
-        crc1 = 0;
-        crc2 = 0;
-        end = next + SHORT;
-        do {
-            __asm__("crc32q\t" "(%3), %0\n\t"
-                    "crc32q\t" SHORTx1 "(%3), %1\n\t"
-                    "crc32q\t" SHORTx2 "(%3), %2"
-                    : "=r"(crc0), "=r"(crc1), "=r"(crc2)
-                    : "r"(next), "0"(crc0), "1"(crc1), "2"(crc2));
-            next += 8;
-        } while (next < end);
-        crc0 = crc32c_shift(crc32c_short, crc0) ^ crc1;
-        crc0 = crc32c_shift(crc32c_short, crc0) ^ crc2;
-        next += SHORT*2;
-        len -= SHORT*3;
-    }
-
-    /* compute the crc on the remaining eight-byte units less than a SHORT*3
-       block */
-    end = next + (len - (len & 7));
-    while (next < end) {
-        __asm__("crc32q\t" "(%1), %0"
-                : "=r"(crc0)
-                : "r"(next), "0"(crc0));
-        next += 8;
-    }
-    len &= 7;
-
-    /* compute the crc for up to seven trailing bytes */
-    while (len) {
-        __asm__("crc32b\t" "(%1), %0"
-                : "=r"(crc0)
-                : "r"(next), "0"(crc0));
-        next++;
-        len--;
-    }
-
-    /* return a post-processed crc */
-    return (uint32_t)crc0 ^ 0xffffffff;
-}
-
-/* Check for SSE 4.2.  SSE 4.2 was first supported in Nehalem processors
-   introduced in November, 2008.  This does not check for the existence of the
-   cpuid instruction itself, which was introduced on the 486SL in 1992, so this
-   will fail on earlier x86 processors.  cpuid works on all Pentium and later
-   processors. */
-#define SSE42(have) \
-    do { \
-        uint32_t eax, ecx; \
-        eax = 1; \
-        __asm__("cpuid" \
-                : "=c"(ecx) \
-                : "a"(eax) \
-                : "%ebx", "%edx"); \
-        (have) = (ecx >> 20) & 1; \
-    } while (0)
-
-#endif /* WITH_CRC32C_HW */
-
-/* Compute a CRC-32C.  If the crc32 instruction is available, use the hardware
-   version.  Otherwise, use the software version. */
-uint32_t crc32c(uint32_t crc, const void *buf, size_t len)
-{
-#if WITH_CRC32C_HW
-        if (sse42)
-                return crc32c_hw(crc, buf, len);
-        else
-#endif
-                return crc32c_sw(crc, buf, len);
-}
-
-
-
-
-
-
-/**
- * @brief Populate shift tables once
- */
-void crc32c_global_init (void) {
-#if WITH_CRC32C_HW
-        SSE42(sse42);
-        if (sse42)
-                crc32c_init_hw();
-        else
-#endif
-                crc32c_init_sw();
-}
-
-int unittest_crc32c (void) {
-        const char *buf =
-"  This software is provided 'as-is', without any express or implied\n"
-"  warranty.  In no event will the author be held liable for any damages\n"
-"  arising from the use of this software.\n"
-"\n"
-"  Permission is granted to anyone to use this software for any purpose,\n"
-"  including commercial applications, and to alter it and redistribute it\n"
-"  freely, subject to the following restrictions:\n"
-"\n"
-"  1. The origin of this software must not be misrepresented; you must not\n"
-"     claim that you wrote the original software. If you use this software\n"
-"     in a product, an acknowledgment in the product documentation would be\n"
-"     appreciated but is not required.\n"
-"  2. Altered source versions must be plainly marked as such, and must not be\n"
-"     misrepresented as being the original software.\n"
-"  3. This notice may not be removed or altered from any source distribution.";
-        const uint32_t expected_crc = 0x7dcde113;
-        uint32_t crc;
-        const char *how;
-
-        crc32c_global_init();
-
-#if WITH_CRC32C_HW
-        if (sse42)
-                how = "hardware (SSE42)";
-        else
-                how = "software (SE42 supported in build but not at runtime)";
-#else
-        how = "software";
-#endif
-        RD_UT_SAY("Calculate CRC32C using %s", how);
-
-        crc = crc32c(0, buf, strlen(buf));
-        RD_UT_ASSERT(crc == expected_crc,
-                     "Calculated CRC 0x%"PRIx32
-                     " not matching expected CRC 0x%"PRIx32,
-                     crc, expected_crc);
-
-        RD_UT_PASS();
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/crc32c.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/crc32c.h b/thirdparty/librdkafka-0.11.1/src/crc32c.h
deleted file mode 100644
index 6abe33e..0000000
--- a/thirdparty/librdkafka-0.11.1/src/crc32c.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-uint32_t crc32c(uint32_t crc, const void *buf, size_t len);
-
-void crc32c_global_init (void);
-
-int unittest_crc32c (void);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/librdkafka_cgrp_synch.png
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/librdkafka_cgrp_synch.png b/thirdparty/librdkafka-0.11.1/src/librdkafka_cgrp_synch.png
deleted file mode 100644
index 8df1eda..0000000
Binary files a/thirdparty/librdkafka-0.11.1/src/librdkafka_cgrp_synch.png and /dev/null differ


[24/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_msgset_writer.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_msgset_writer.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_msgset_writer.c
deleted file mode 100644
index 5faad84..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_msgset_writer.c
+++ /dev/null
@@ -1,1161 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_msg.h"
-#include "rdkafka_msgset.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_lz4.h"
-
-#include "snappy.h"
-#include "rdvarint.h"
-#include "crc32c.h"
-
-
-typedef struct rd_kafka_msgset_writer_s {
-        rd_kafka_buf_t *msetw_rkbuf;     /* Backing store buffer (refcounted)*/
-
-        int16_t msetw_ApiVersion;        /* ProduceRequest ApiVersion */
-        int     msetw_MsgVersion;        /* MsgVersion to construct */
-        int     msetw_features;          /* Protocol features to use */
-        int     msetw_msgcntmax;         /* Max number of messages to send
-                                          * in a batch. */
-        size_t  msetw_messages_len;      /* Total size of Messages, without
-                                          * MessageSet header */
-
-        size_t  msetw_MessageSetSize;    /* Current MessageSetSize value */
-        size_t  msetw_of_MessageSetSize; /* offset of MessageSetSize */
-        size_t  msetw_of_start;          /* offset of MessageSet */
-
-        int     msetw_relative_offsets;  /* Bool: use relative offsets */
-
-        /* For MessageSet v2 */
-        int     msetw_Attributes;        /* MessageSet Attributes */
-        int64_t msetw_MaxTimestamp;      /* Maximum timestamp in batch */
-        size_t  msetw_of_CRC;            /* offset of MessageSet.CRC */
-
-        /* First message information */
-        struct {
-                size_t     of;  /* rkbuf's first message position */
-                int64_t    timestamp;
-        } msetw_firstmsg;
-
-        rd_kafka_broker_t *msetw_rkb;    /* @warning Not a refcounted
-                                          *          reference! */
-        rd_kafka_toppar_t *msetw_rktp;   /* @warning Not a refcounted
-                                          *          reference! */
-} rd_kafka_msgset_writer_t;
-
-
-
-/**
- * @brief Select ApiVersion and MsgVersion to use based on broker's
- *        feature compatibility.
- *
- * @locality broker thread
- */
-static RD_INLINE void
-rd_kafka_msgset_writer_select_MsgVersion (rd_kafka_msgset_writer_t *msetw) {
-        rd_kafka_broker_t *rkb = msetw->msetw_rkb;
-        int feature;
-
-        if ((feature = rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER2)) {
-                msetw->msetw_ApiVersion = 3;
-                msetw->msetw_MsgVersion = 2;
-                msetw->msetw_features |= feature;
-        } else if ((feature = rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER1)) {
-                msetw->msetw_ApiVersion = 2;
-                msetw->msetw_MsgVersion = 1;
-                msetw->msetw_features |= feature;
-        } else {
-                if ((feature =
-                     rkb->rkb_features & RD_KAFKA_FEATURE_THROTTLETIME)) {
-                        msetw->msetw_ApiVersion = 1;
-                        msetw->msetw_features |= feature;
-                } else
-                        msetw->msetw_ApiVersion = 0;
-                msetw->msetw_MsgVersion = 0;
-        }
-}
-
-
-/**
- * @brief Allocate buffer for messageset writer based on a previously set
- *        up \p msetw.
- *
- * Allocate iovecs to hold all headers and messages,
- * and allocate enough space to allow copies of small messages.
- * The allocated size is the minimum of message.max.bytes
- * or queued_bytes + msgcntmax * msg_overhead
- */
-static void
-rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) {
-        rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk;
-        size_t msg_overhead = 0;
-        size_t hdrsize = 0;
-        size_t msgsetsize = 0;
-        size_t bufsize;
-
-        rd_kafka_assert(NULL, !msetw->msetw_rkbuf);
-
-        /* Calculate worst-case buffer size, produce header size,
-         * message size, etc, this isn't critical but avoids unnecesary
-         * extra allocations. The buffer will grow as needed if we get
-         * this wrong.
-         *
-         * ProduceRequest headers go in one iovec:
-         *  ProduceRequest v0..2:
-         *    RequiredAcks + Timeout +
-         *    [Topic + [Partition + MessageSetSize]]
-         *
-         *  ProduceRequest v3:
-         *    TransactionalId + RequiredAcks + Timeout +
-         *    [Topic + [Partition + MessageSetSize + MessageSet]]
-         */
-
-        /*
-         * ProduceRequest header sizes
-         */
-        switch (msetw->msetw_ApiVersion)
-        {
-        case 3:
-                /* Add TransactionalId */
-                hdrsize += RD_KAFKAP_STR_SIZE(rk->rk_eos.TransactionalId);
-                /* FALLTHRU */
-        case 0:
-        case 1:
-        case 2:
-                hdrsize +=
-                        /* RequiredAcks + Timeout + TopicCnt */
-                        2 + 4 + 4 +
-                        /* Topic */
-                        RD_KAFKAP_STR_SIZE(msetw->msetw_rktp->
-                                           rktp_rkt->rkt_topic) +
-                        /* PartitionCnt + Partition + MessageSetSize */
-                        4 + 4 + 4;
-                msgsetsize += 4; /* MessageSetSize */
-                break;
-
-        default:
-                RD_NOTREACHED();
-        }
-
-        /*
-         * MsgVersion specific sizes:
-         * - (Worst-case) Message overhead: message fields
-         * - MessageSet header size
-         */
-        switch (msetw->msetw_MsgVersion)
-        {
-        case 0:
-                /* MsgVer0 */
-                msg_overhead = RD_KAFKAP_MESSAGE_V0_OVERHEAD;
-                break;
-        case 1:
-                /* MsgVer1 */
-                msg_overhead = RD_KAFKAP_MESSAGE_V1_OVERHEAD;
-                break;
-
-        case 2:
-                /* MsgVer2 uses varints, we calculate for the worst-case. */
-                msg_overhead += RD_KAFKAP_MESSAGE_V2_OVERHEAD;
-
-                /* MessageSet header fields */
-                msgsetsize +=
-                        8 /* BaseOffset */ +
-                        4 /* Length */ +
-                        4 /* PartitionLeaderEpoch */ +
-                        1 /* Magic (MsgVersion) */ +
-                        4 /* CRC (CRC32C) */ +
-                        2 /* Attributes */ +
-                        4 /* LastOffsetDelta */ +
-                        8 /* BaseTimestamp */ +
-                        8 /* MaxTimestamp */ +
-                        8 /* ProducerId */ +
-                        2 /* ProducerEpoch */ +
-                        4 /* BaseSequence */ +
-                        4 /* RecordCount */;
-                break;
-
-        default:
-                RD_NOTREACHED();
-        }
-
-        /*
-         * Calculate total buffer size to allocate
-         */
-        bufsize = hdrsize + msgsetsize;
-
-        /* If copying for small payloads is enabled, allocate enough
-         * space for each message to be copied based on this limit.
-         */
-        if (rk->rk_conf.msg_copy_max_size > 0) {
-                size_t queued_bytes = rd_kafka_msgq_size(&msetw->msetw_rktp->
-                                                         rktp_xmit_msgq);
-                bufsize += RD_MIN(queued_bytes,
-                                  (size_t)rk->rk_conf.msg_copy_max_size *
-                                  msetw->msetw_msgcntmax);
-        }
-
-        /* Add estimed per-message overhead */
-        bufsize += msg_overhead * msetw->msetw_msgcntmax;
-
-        /* Cap allocation at message.max.bytes */
-        if (bufsize > (size_t)rk->rk_conf.max_msg_size)
-                bufsize = (size_t)rk->rk_conf.max_msg_size;
-
-        /*
-         * Allocate iovecs to hold all headers and messages,
-         * and allocate auxilliery space for message headers, etc.
-         */
-        msetw->msetw_rkbuf =
-                rd_kafka_buf_new_request(msetw->msetw_rkb, RD_KAFKAP_Produce,
-                                         msetw->msetw_msgcntmax/2 + 10,
-                                         bufsize);
-
-        rd_kafka_buf_ApiVersion_set(msetw->msetw_rkbuf,
-                                    msetw->msetw_ApiVersion,
-                                    msetw->msetw_features);
-}
-
-
-/**
- * @brief Write the MessageSet header.
- * @remark Must only be called for MsgVersion 2
- */
-static void
-rd_kafka_msgset_writer_write_MessageSet_v2_header (
-        rd_kafka_msgset_writer_t *msetw) {
-        rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
-        rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk;
-
-        rd_kafka_assert(NULL, msetw->msetw_ApiVersion >= 3);
-        rd_kafka_assert(NULL, msetw->msetw_MsgVersion == 2);
-
-        /* BaseOffset (also store the offset to the start of
-         * the messageset header fields) */
-        msetw->msetw_of_start = rd_kafka_buf_write_i64(rkbuf, 0);
-
-        /* Length: updated later */
-        rd_kafka_buf_write_i32(rkbuf, 0);
-
-        /* PartitionLeaderEpoch (KIP-101) */
-        rd_kafka_buf_write_i32(rkbuf, 0);
-
-        /* Magic (MsgVersion) */
-        rd_kafka_buf_write_i8(rkbuf, msetw->msetw_MsgVersion);
-
-        /* CRC (CRC32C): updated later.
-         * CRC needs to be done after the entire messageset+messages has
-         * been constructed and the following header fields updated. :(
-         * Save the offset for this position. so it can be udpated later. */
-        msetw->msetw_of_CRC = rd_kafka_buf_write_i32(rkbuf, 0);
-
-        /* Attributes: updated later */
-        rd_kafka_buf_write_i16(rkbuf, 0);
-
-        /* LastOffsetDelta: updated later */
-        rd_kafka_buf_write_i32(rkbuf, 0);
-
-        /* BaseTimestamp: updated later */
-        rd_kafka_buf_write_i64(rkbuf, 0);
-
-        /* MaxTimestamp: updated later */
-        rd_kafka_buf_write_i64(rkbuf, 0);
-
-        /* ProducerId */
-        rd_kafka_buf_write_i64(rkbuf, rk->rk_eos.PID);
-
-        /* ProducerEpoch */
-        rd_kafka_buf_write_i16(rkbuf, rk->rk_eos.ProducerEpoch);
-
-        /* BaseSequence */
-        rd_kafka_buf_write_i32(rkbuf, -1);
-
-        /* RecordCount: udpated later */
-        rd_kafka_buf_write_i32(rkbuf, 0);
-
-}
-
-
-/**
- * @brief Write ProduceRequest headers.
- *        When this function returns the msgset is ready for
- *        writing individual messages.
- *        msetw_MessageSetSize will have been set to the messageset header.
- */
-static void
-rd_kafka_msgset_writer_write_Produce_header (rd_kafka_msgset_writer_t *msetw) {
-
-        rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
-        rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk;
-        rd_kafka_itopic_t *rkt = msetw->msetw_rktp->rktp_rkt;
-
-        /* V3: TransactionalId */
-        if (msetw->msetw_ApiVersion == 3)
-                rd_kafka_buf_write_kstr(rkbuf, rk->rk_eos.TransactionalId);
-
-        /* RequiredAcks */
-        rd_kafka_buf_write_i16(rkbuf, rkt->rkt_conf.required_acks);
-
-        /* Timeout */
-        rd_kafka_buf_write_i32(rkbuf, rkt->rkt_conf.request_timeout_ms);
-
-        /* TopicArrayCnt */
-        rd_kafka_buf_write_i32(rkbuf, 1);
-
-        /* Insert topic */
-        rd_kafka_buf_write_kstr(rkbuf, rkt->rkt_topic);
-
-        /* PartitionArrayCnt */
-        rd_kafka_buf_write_i32(rkbuf, 1);
-
-        /* Partition */
-        rd_kafka_buf_write_i32(rkbuf, msetw->msetw_rktp->rktp_partition);
-
-        /* MessageSetSize: Will be finalized later*/
-        msetw->msetw_of_MessageSetSize = rd_kafka_buf_write_i32(rkbuf, 0);
-
-        if (msetw->msetw_MsgVersion == 2) {
-                /* MessageSet v2 header */
-                rd_kafka_msgset_writer_write_MessageSet_v2_header(msetw);
-                msetw->msetw_MessageSetSize = RD_KAFKAP_MSGSET_V2_SIZE;
-        } else {
-                /* Older MessageSet */
-                msetw->msetw_MessageSetSize = RD_KAFKAP_MSGSET_V0_SIZE;
-        }
-}
-
-
-/**
- * @brief Initialize a ProduceRequest MessageSet writer for
- *        the given broker and partition.
- *
- *        A new buffer will be allocated to fit the pending messages in queue.
- *
- * @returns the number of messages to enqueue
- *
- * @remark This currently constructs the entire ProduceRequest, containing
- *         a single outer MessageSet for a single partition.
- */
-static int rd_kafka_msgset_writer_init (rd_kafka_msgset_writer_t *msetw,
-                                         rd_kafka_broker_t *rkb,
-                                         rd_kafka_toppar_t *rktp) {
-        int msgcnt = rd_atomic32_get(&rktp->rktp_xmit_msgq.rkmq_msg_cnt);
-
-        if (msgcnt == 0)
-                return 0;
-
-        memset(msetw, 0, sizeof(*msetw));
-
-        msetw->msetw_rktp = rktp;
-        msetw->msetw_rkb = rkb;
-
-        /* Max number of messages to send in a batch,
-         * limited by current queue size or configured batch size,
-         * whichever is lower. */
-        msetw->msetw_msgcntmax = RD_MIN(msgcnt,
-                                        rkb->rkb_rk->rk_conf.
-                                        batch_num_messages);
-        rd_dassert(msetw->msetw_msgcntmax > 0);
-
-        /* Select MsgVersion to use */
-        rd_kafka_msgset_writer_select_MsgVersion(msetw);
-
-        /* MsgVersion specific setup. */
-        switch (msetw->msetw_MsgVersion)
-        {
-        case 2:
-                msetw->msetw_relative_offsets = 1; /* OffsetDelta */
-                break;
-        case 1:
-                if (rktp->rktp_rkt->rkt_conf.compression_codec)
-                        msetw->msetw_relative_offsets = 1;
-                break;
-        }
-
-        /* Allocate backing buffer */
-        rd_kafka_msgset_writer_alloc_buf(msetw);
-
-        /* Construct first part of Produce header + MessageSet header */
-        rd_kafka_msgset_writer_write_Produce_header(msetw);
-
-        /* The current buffer position is now where the first message
-         * is located.
-         * Record the current buffer position so it can be rewound later
-         * in case of compression. */
-        msetw->msetw_firstmsg.of = rd_buf_write_pos(&msetw->msetw_rkbuf->
-                                                    rkbuf_buf);
-
-        return msetw->msetw_msgcntmax;
-}
-
-
-
-/**
- * @brief Copy or link message payload to buffer.
- */
-static RD_INLINE void
-rd_kafka_msgset_writer_write_msg_payload (rd_kafka_msgset_writer_t *msetw,
-                                          const rd_kafka_msg_t *rkm,
-                                          void (*free_cb)(void *)) {
-        const rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk;
-        rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
-
-        /* If payload is below the copy limit and there is still
-         * room in the buffer we'll copy the payload to the buffer,
-         * otherwise we push a reference to the memory. */
-        if (rkm->rkm_len <= (size_t)rk->rk_conf.msg_copy_max_size &&
-            rd_buf_write_remains(&rkbuf->rkbuf_buf) > rkm->rkm_len)
-                rd_kafka_buf_write(rkbuf,
-                                   rkm->rkm_payload, rkm->rkm_len);
-        else
-                rd_kafka_buf_push(rkbuf, rkm->rkm_payload, rkm->rkm_len,
-                                  free_cb);
-}
-
-
-/**
- * @brief Write message to messageset buffer with MsgVersion 0 or 1.
- * @returns the number of bytes written.
- */
-static size_t
-rd_kafka_msgset_writer_write_msg_v0_1 (rd_kafka_msgset_writer_t *msetw,
-                                       rd_kafka_msg_t *rkm,
-                                       int64_t Offset,
-                                       int8_t MsgAttributes,
-                                       void (*free_cb)(void *)) {
-        rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
-        size_t MessageSize;
-        size_t of_Crc;
-
-        /*
-         * MessageSet's (v0 and v1) per-Message header.
-         */
-
-        /* Offset (only relevant for compressed messages on MsgVersion v1) */
-        rd_kafka_buf_write_i64(rkbuf, Offset);
-
-        /* MessageSize */
-        MessageSize =
-                4 + 1 + 1 + /* Crc+MagicByte+Attributes */
-                4 /* KeyLength */ + rkm->rkm_key_len +
-                4 /* ValueLength */ + rkm->rkm_len;
-
-        if (msetw->msetw_MsgVersion == 1)
-                MessageSize += 8; /* Timestamp i64 */
-
-        rd_kafka_buf_write_i32(rkbuf, (int32_t)MessageSize);
-
-        /*
-         * Message
-         */
-        /* Crc: will be updated later */
-        of_Crc = rd_kafka_buf_write_i32(rkbuf, 0);
-
-        /* Start Crc calculation of all buf writes. */
-        rd_kafka_buf_crc_init(rkbuf);
-
-        /* MagicByte */
-        rd_kafka_buf_write_i8(rkbuf, msetw->msetw_MsgVersion);
-
-        /* Attributes */
-        rd_kafka_buf_write_i8(rkbuf, MsgAttributes);
-
-        /* V1: Timestamp */
-        if (msetw->msetw_MsgVersion == 1)
-                rd_kafka_buf_write_i64(rkbuf, rkm->rkm_timestamp);
-
-        /* Message Key */
-        rd_kafka_buf_write_bytes(rkbuf, rkm->rkm_key, rkm->rkm_key_len);
-
-        /* Write or copy Value/payload */
-        if (rkm->rkm_payload) {
-                rd_kafka_buf_write_i32(rkbuf, (int32_t)rkm->rkm_len);
-                rd_kafka_msgset_writer_write_msg_payload(msetw, rkm, free_cb);
-        } else
-                rd_kafka_buf_write_i32(rkbuf, RD_KAFKAP_BYTES_LEN_NULL);
-
-        /* Finalize Crc */
-        rd_kafka_buf_update_u32(rkbuf, of_Crc,
-                                rd_kafka_buf_crc_finalize(rkbuf));
-
-
-        /* Return written message size */
-        return 8/*Offset*/ + 4/*MessageSize*/ + MessageSize;
-}
-
-/**
- * @brief Write message to messageset buffer with MsgVersion 2.
- * @returns the number of bytes written.
- */
-static size_t
-rd_kafka_msgset_writer_write_msg_v2 (rd_kafka_msgset_writer_t *msetw,
-                                     rd_kafka_msg_t *rkm,
-                                     int64_t Offset,
-                                     int8_t MsgAttributes,
-                                     void (*free_cb)(void *)) {
-        rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
-        size_t MessageSize = 0;
-        char varint_Length[RD_UVARINT_ENC_SIZEOF(int32_t)];
-        char varint_TimestampDelta[RD_UVARINT_ENC_SIZEOF(int64_t)];
-        char varint_OffsetDelta[RD_UVARINT_ENC_SIZEOF(int64_t)];
-        char varint_KeyLen[RD_UVARINT_ENC_SIZEOF(int32_t)];
-        char varint_ValueLen[RD_UVARINT_ENC_SIZEOF(int32_t)];
-        char varint_HeaderCount[RD_UVARINT_ENC_SIZEOF(int32_t)];
-        size_t sz_Length;
-        size_t sz_TimestampDelta;
-        size_t sz_OffsetDelta;
-        size_t sz_KeyLen;
-        size_t sz_ValueLen;
-        size_t sz_HeaderCount;
-
-        /* All varints, except for Length, needs to be pre-built
-         * so that the Length field can be set correctly and thus have
-         * correct varint encoded width. */
-
-        sz_TimestampDelta = rd_uvarint_enc_i64(
-                varint_TimestampDelta, sizeof(varint_TimestampDelta),
-                rkm->rkm_timestamp - msetw->msetw_firstmsg.timestamp);
-        sz_OffsetDelta = rd_uvarint_enc_i64(
-                varint_OffsetDelta, sizeof(varint_OffsetDelta), Offset);
-        sz_KeyLen = rd_uvarint_enc_i32(
-                varint_KeyLen, sizeof(varint_KeyLen),
-                rkm->rkm_key ? (int32_t)rkm->rkm_key_len :
-                (int32_t)RD_KAFKAP_BYTES_LEN_NULL);
-        sz_ValueLen = rd_uvarint_enc_i32(
-                varint_ValueLen, sizeof(varint_ValueLen),
-                rkm->rkm_payload ? (int32_t)rkm->rkm_len :
-                (int32_t)RD_KAFKAP_BYTES_LEN_NULL);
-        sz_HeaderCount = rd_uvarint_enc_i32(
-                varint_HeaderCount, sizeof(varint_HeaderCount), 0);
-
-        /* Calculate MessageSize without length of Length (added later)
-         * to store it in Length. */
-        MessageSize =
-                1 /* MsgAttributes */ +
-                sz_TimestampDelta +
-                sz_OffsetDelta +
-                sz_KeyLen +
-                rkm->rkm_key_len +
-                sz_ValueLen +
-                rkm->rkm_len +
-                sz_HeaderCount;
-
-        /* Length */
-        sz_Length = rd_uvarint_enc_i64(varint_Length, sizeof(varint_Length),
-                                       MessageSize);
-        rd_kafka_buf_write(rkbuf, varint_Length, sz_Length);
-        MessageSize += sz_Length;
-
-        /* Attributes: The MsgAttributes argument is losely based on MsgVer0
-         *             which don't apply for MsgVer2 */
-        rd_kafka_buf_write_i8(rkbuf, 0);
-
-        /* TimestampDelta */
-        rd_kafka_buf_write(rkbuf, varint_TimestampDelta, sz_TimestampDelta);
-
-        /* OffsetDelta */
-        rd_kafka_buf_write(rkbuf, varint_OffsetDelta, sz_OffsetDelta);
-
-        /* KeyLen */
-        rd_kafka_buf_write(rkbuf, varint_KeyLen, sz_KeyLen);
-
-        /* Key (if any) */
-        if (rkm->rkm_key)
-                rd_kafka_buf_write(rkbuf, rkm->rkm_key, rkm->rkm_key_len);
-
-        /* ValueLen */
-        rd_kafka_buf_write(rkbuf, varint_ValueLen, sz_ValueLen);
-
-        /* Write or copy Value/payload */
-        if (rkm->rkm_payload)
-                rd_kafka_msgset_writer_write_msg_payload(msetw, rkm, free_cb);
-
-        /* HeaderCount (headers currently not implemented) */
-        rd_kafka_buf_write(rkbuf, varint_HeaderCount, sz_HeaderCount);
-
-        /* Return written message size */
-        return MessageSize;
-}
-
-
-/**
- * @brief Write message to messageset buffer.
- * @returns the number of bytes written.
- */
-static size_t
-rd_kafka_msgset_writer_write_msg (rd_kafka_msgset_writer_t *msetw,
-                                  rd_kafka_msg_t *rkm,
-                                  int64_t Offset, int8_t MsgAttributes,
-                                  void (*free_cb)(void *)) {
-        size_t outlen;
-        size_t (*writer[]) (rd_kafka_msgset_writer_t *,
-                            rd_kafka_msg_t *, int64_t, int8_t,
-                            void (*)(void *)) = {
-                [0] = rd_kafka_msgset_writer_write_msg_v0_1,
-                [1] = rd_kafka_msgset_writer_write_msg_v0_1,
-                [2] = rd_kafka_msgset_writer_write_msg_v2
-        };
-        size_t actual_written;
-        size_t pre_pos;
-
-        if (likely(rkm->rkm_timestamp))
-                MsgAttributes |= RD_KAFKA_MSG_ATTR_CREATE_TIME;
-
-        pre_pos = rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf);
-
-        outlen = writer[msetw->msetw_MsgVersion](msetw, rkm,
-                                                 Offset, MsgAttributes,
-                                                 free_cb);
-
-        actual_written = rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) -
-                pre_pos;
-        rd_assert(outlen <=
-                   rd_kafka_msg_wire_size(rkm, msetw->msetw_MsgVersion));
-        rd_assert(outlen == actual_written);
-
-        return outlen;
-
-}
-
-/**
- * @brief Write as many messages from the given message queue to
- *        the messageset.
- */
-static void
-rd_kafka_msgset_writer_write_msgq (rd_kafka_msgset_writer_t *msetw,
-                                   rd_kafka_msgq_t *rkmq) {
-        rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
-        rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
-        rd_kafka_broker_t *rkb = msetw->msetw_rkb;
-        size_t len = rd_buf_len(&msetw->msetw_rkbuf->rkbuf_buf);
-        size_t max_msg_size = (size_t)msetw->msetw_rkb->rkb_rk->
-                rk_conf.max_msg_size;
-        rd_ts_t int_latency_base;
-        rd_ts_t MaxTimestamp = 0;
-        rd_kafka_msg_t *rkm;
-        int msgcnt = 0;
-
-        /* Internal latency calculation base.
-         * Uses rkm_ts_timeout which is enqueue time + timeout */
-        int_latency_base = rd_clock() +
-                (rktp->rktp_rkt->rkt_conf.message_timeout_ms * 1000);
-
-        /* Acquire BaseTimestamp from first message. */
-        rkm = TAILQ_FIRST(&rkmq->rkmq_msgs);
-        rd_kafka_assert(NULL, rkm);
-        msetw->msetw_firstmsg.timestamp = rkm->rkm_timestamp;
-
-        /*
-         * Write as many messages as possible until buffer is full
-         * or limit reached.
-         */
-        do {
-                if (unlikely(msgcnt == msetw->msetw_msgcntmax ||
-                             len + rd_kafka_msg_wire_size(rkm, msetw->
-                                                          msetw_MsgVersion) >
-                             max_msg_size)) {
-                        rd_rkb_dbg(rkb, MSG, "PRODUCE",
-                                   "No more space in current MessageSet "
-                                   "(%i message(s), %"PRIusz" bytes)",
-                                   msgcnt, len);
-                        break;
-                }
-
-                /* Move message to buffer's queue */
-                rd_kafka_msgq_deq(rkmq, rkm, 1);
-                rd_kafka_msgq_enq(&rkbuf->rkbuf_msgq, rkm);
-
-                /* Add internal latency metrics */
-                rd_avg_add(&rkb->rkb_avg_int_latency,
-                           int_latency_base - rkm->rkm_ts_timeout);
-
-                /* MessageSet v2's .MaxTimestamp field */
-                if (unlikely(MaxTimestamp < rkm->rkm_timestamp))
-                        MaxTimestamp = rkm->rkm_timestamp;
-
-                /* Write message to buffer */
-                len += rd_kafka_msgset_writer_write_msg(msetw, rkm, msgcnt, 0,
-                                                        NULL);
-
-                rd_dassert(len <= max_msg_size);
-                msgcnt++;
-
-        } while ((rkm = TAILQ_FIRST(&rkmq->rkmq_msgs)));
-
-        msetw->msetw_MaxTimestamp = MaxTimestamp;
-}
-
-
-#if WITH_ZLIB
-/**
- * @brief Compress messageset using gzip/zlib
- */
-static int
-rd_kafka_msgset_writer_compress_gzip (rd_kafka_msgset_writer_t *msetw,
-                                      rd_slice_t *slice,
-                                      struct iovec *ciov) {
-
-        rd_kafka_broker_t *rkb = msetw->msetw_rkb;
-        rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
-        z_stream strm;
-        size_t len = rd_slice_remains(slice);
-        const void *p;
-        size_t rlen;
-        int r;
-
-        memset(&strm, 0, sizeof(strm));
-        r = deflateInit2(&strm, Z_DEFAULT_COMPRESSION,
-                         Z_DEFLATED, 15+16,
-                         8, Z_DEFAULT_STRATEGY);
-        if (r != Z_OK) {
-                rd_rkb_log(rkb, LOG_ERR, "GZIP",
-                           "Failed to initialize gzip for "
-                           "compressing %"PRIusz" bytes in "
-                           "topic %.*s [%"PRId32"]: %s (%i): "
-                           "sending uncompressed",
-                           len,
-                           RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                           rktp->rktp_partition,
-                           strm.msg ? strm.msg : "", r);
-                return -1;
-        }
-
-        /* Calculate maximum compressed size and
-         * allocate an output buffer accordingly, being
-         * prefixed with the Message header. */
-        ciov->iov_len = deflateBound(&strm, (uLong)rd_slice_remains(slice));
-        ciov->iov_base = rd_malloc(ciov->iov_len);
-
-        strm.next_out  = (void *)ciov->iov_base;
-        strm.avail_out =   (uInt)ciov->iov_len;
-
-        /* Iterate through each segment and compress it. */
-        while ((rlen = rd_slice_reader(slice, &p))) {
-
-                strm.next_in  = (void *)p;
-                strm.avail_in =   (uInt)rlen;
-
-                /* Compress message */
-                if ((r = deflate(&strm, Z_NO_FLUSH) != Z_OK)) {
-                        rd_rkb_log(rkb, LOG_ERR, "GZIP",
-                                   "Failed to gzip-compress "
-                                   "%"PRIusz" bytes (%"PRIusz" total) for "
-                                   "topic %.*s [%"PRId32"]: "
-                                   "%s (%i): "
-                                   "sending uncompressed",
-                                   rlen, len,
-                                   RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                                   rktp->rktp_partition,
-                                   strm.msg ? strm.msg : "", r);
-                        deflateEnd(&strm);
-                        rd_free(ciov->iov_base);
-                        return -1;
-                }
-
-                rd_kafka_assert(rkb->rkb_rk, strm.avail_in == 0);
-        }
-
-        /* Finish the compression */
-        if ((r = deflate(&strm, Z_FINISH)) != Z_STREAM_END) {
-                rd_rkb_log(rkb, LOG_ERR, "GZIP",
-                           "Failed to finish gzip compression "
-                           " of %"PRIusz" bytes for "
-                           "topic %.*s [%"PRId32"]: "
-                           "%s (%i): "
-                           "sending uncompressed",
-                           len,
-                           RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                           rktp->rktp_partition,
-                           strm.msg ? strm.msg : "", r);
-                deflateEnd(&strm);
-                rd_free(ciov->iov_base);
-                return -1;
-        }
-
-        ciov->iov_len = strm.total_out;
-
-        /* Deinitialize compression */
-        deflateEnd(&strm);
-
-        return 0;
-}
-#endif
-
-
-#if WITH_SNAPPY
-/**
- * @brief Compress messageset using Snappy
- */
-static int
-rd_kafka_msgset_writer_compress_snappy (rd_kafka_msgset_writer_t *msetw,
-                                        rd_slice_t *slice, struct iovec *ciov) {
-        rd_kafka_broker_t *rkb = msetw->msetw_rkb;
-        rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
-        struct iovec *iov;
-        size_t iov_max, iov_cnt;
-        struct snappy_env senv;
-        size_t len = rd_slice_remains(slice);
-        int r;
-
-        /* Initialize snappy compression environment */
-        rd_kafka_snappy_init_env_sg(&senv, 1/*iov enable*/);
-
-        /* Calculate maximum compressed size and
-         * allocate an output buffer accordingly. */
-        ciov->iov_len = rd_kafka_snappy_max_compressed_length(len);
-        ciov->iov_base = rd_malloc(ciov->iov_len);
-
-        iov_max = slice->buf->rbuf_segment_cnt;
-        iov = rd_alloca(sizeof(*iov) * iov_max);
-
-        rd_slice_get_iov(slice, iov, &iov_cnt, iov_max, len);
-
-        /* Compress each message */
-        if ((r = rd_kafka_snappy_compress_iov(&senv, iov, iov_cnt, len,
-                                              ciov)) != 0) {
-                rd_rkb_log(rkb, LOG_ERR, "SNAPPY",
-                           "Failed to snappy-compress "
-                           "%"PRIusz" bytes for "
-                           "topic %.*s [%"PRId32"]: %s: "
-                           "sending uncompressed",
-                           len, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                           rktp->rktp_partition,
-                           rd_strerror(-r));
-                rd_free(ciov->iov_base);
-                return -1;
-        }
-
-        /* rd_free snappy environment */
-        rd_kafka_snappy_free_env(&senv);
-
-        return 0;
-}
-#endif
-
-/**
- * @brief Compress messageset using LZ4F
- */
-static int
-rd_kafka_msgset_writer_compress_lz4 (rd_kafka_msgset_writer_t *msetw,
-                                     rd_slice_t *slice, struct iovec *ciov) {
-        rd_kafka_resp_err_t err;
-        err = rd_kafka_lz4_compress(msetw->msetw_rkb,
-                                    /* Correct or incorrect HC */
-                                    msetw->msetw_MsgVersion >= 1 ? 1 : 0,
-                                    slice, &ciov->iov_base, &ciov->iov_len);
-        return (err ? -1 : 0);
-}
-
-
-
-/**
- * @brief Compress the message set.
- * @param outlenp in: total uncompressed messages size,
- *                out (on success): returns the compressed buffer size.
- * @returns 0 on success or if -1 if compression failed.
- * @remark Compression failures are not critical, we'll just send the
- *         the messageset uncompressed.
- */
-static int
-rd_kafka_msgset_writer_compress (rd_kafka_msgset_writer_t *msetw,
-                                 size_t *outlenp) {
-        rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
-        rd_buf_t *rbuf = &msetw->msetw_rkbuf->rkbuf_buf;
-        rd_slice_t slice;
-        size_t len = *outlenp;
-        struct iovec ciov = RD_ZERO_INIT; /* Compressed output buffer */
-        int r = -1;
-        size_t outlen;
-
-        rd_assert(rd_buf_len(rbuf) >= msetw->msetw_firstmsg.of + len);
-
-        /* Create buffer slice from firstmsg and onwards */
-        r = rd_slice_init(&slice, rbuf, msetw->msetw_firstmsg.of, len);
-        rd_assert(r == 0 || !*"invalid firstmsg position");
-
-        switch (rktp->rktp_rkt->rkt_conf.compression_codec)
-        {
-#if WITH_ZLIB
-        case RD_KAFKA_COMPRESSION_GZIP:
-                r = rd_kafka_msgset_writer_compress_gzip(msetw, &slice, &ciov);
-                break;
-#endif
-
-#if WITH_SNAPPY
-        case RD_KAFKA_COMPRESSION_SNAPPY:
-                r = rd_kafka_msgset_writer_compress_snappy(msetw, &slice,
-                                                           &ciov);
-                break;
-#endif
-
-        case RD_KAFKA_COMPRESSION_LZ4:
-                /* Skip LZ4 compression if broker doesn't support it. */
-                if (!(msetw->msetw_rkb->rkb_features & RD_KAFKA_FEATURE_LZ4))
-                        return -1;
-
-                r = rd_kafka_msgset_writer_compress_lz4(msetw, &slice, &ciov);
-                break;
-
-
-        default:
-                rd_kafka_assert(NULL,
-                                !*"notreached: unsupported compression.codec");
-                break;
-        }
-
-        if (r == -1) /* Compression failed, send uncompressed */
-                return -1;
-
-
-        if (unlikely(ciov.iov_len > len)) {
-                /* If the compressed data is larger than the uncompressed size
-                 * then throw it away and send as uncompressed. */
-                rd_free(ciov.iov_base);
-                return -1;
-        }
-
-        /* Set compression codec in MessageSet.Attributes */
-        msetw->msetw_Attributes |= rktp->rktp_rkt->rkt_conf.compression_codec;
-
-        /* Rewind rkbuf to the pre-message checkpoint (firstmsg)
-         * and replace the original message(s) with the compressed payload,
-         * possibly with version dependent enveloping. */
-        rd_buf_write_seek(rbuf, msetw->msetw_firstmsg.of);
-
-        rd_kafka_assert(msetw->msetw_rkb->rkb_rk, ciov.iov_len < INT32_MAX);
-
-        if (msetw->msetw_MsgVersion == 2) {
-                /* MsgVersion 2 has no inner MessageSet header or wrapping
-                 * for compressed messages, just the messages back-to-back,
-                 * so we can push the compressed memory directly to the
-                 * buffer without wrapping it. */
-                rd_buf_push(rbuf, ciov.iov_base, ciov.iov_len, rd_free);
-                outlen = ciov.iov_len;
-
-        } else {
-                /* Older MessageSets envelope/wrap the compressed MessageSet
-                 * in an outer Message. */
-                rd_kafka_msg_t rkm = {
-                        .rkm_len       = ciov.iov_len,
-                        .rkm_payload   = ciov.iov_base,
-                        .rkm_timestamp = msetw->msetw_firstmsg.timestamp
-                };
-                outlen = rd_kafka_msgset_writer_write_msg(
-                        msetw, &rkm, 0,
-                        rktp->rktp_rkt->rkt_conf.compression_codec,
-                        rd_free/*free for ciov.iov_base*/);
-        }
-
-        *outlenp = outlen;
-
-        return 0;
-}
-
-
-
-
-/**
- * @brief Calculate MessageSet v2 CRC (CRC32C) when messageset is complete.
- */
-static void
-rd_kafka_msgset_writer_calc_crc_v2 (rd_kafka_msgset_writer_t *msetw) {
-        int32_t crc;
-        rd_slice_t slice;
-        int r;
-
-        r = rd_slice_init(&slice, &msetw->msetw_rkbuf->rkbuf_buf,
-                          msetw->msetw_of_CRC+4,
-                          rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) -
-                          msetw->msetw_of_CRC-4);
-       rd_assert(!r && *"slice_init failed");
-
-       /* CRC32C calculation */
-        crc = rd_slice_crc32c(&slice);
-
-        /* Update CRC at MessageSet v2 CRC offset */
-        rd_kafka_buf_update_i32(msetw->msetw_rkbuf, msetw->msetw_of_CRC, crc);
-}
-
-/**
- * @brief Finalize MessageSet v2 header fields.
- */
-static void
-rd_kafka_msgset_writer_finalize_MessageSet_v2_header (
-        rd_kafka_msgset_writer_t *msetw) {
-        rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
-        int msgcnt = rd_kafka_msgq_len(&rkbuf->rkbuf_msgq);
-
-        rd_kafka_assert(NULL, msgcnt > 0);
-        rd_kafka_assert(NULL, msetw->msetw_ApiVersion >= 3);
-
-        msetw->msetw_MessageSetSize = RD_KAFKAP_MSGSET_V2_SIZE +
-                msetw->msetw_messages_len;
-
-        /* MessageSet.Length is the same as
-         * MessageSetSize minus field widths for FirstOffset+Length */
-        rd_kafka_buf_update_i32(rkbuf, msetw->msetw_of_start +
-                                RD_KAFKAP_MSGSET_V2_OF_Length,
-                                (int32_t)msetw->msetw_MessageSetSize - (8+4));
-
-        msetw->msetw_Attributes |= RD_KAFKA_MSG_ATTR_CREATE_TIME;
-
-        rd_kafka_buf_update_i16(rkbuf, msetw->msetw_of_start +
-                                RD_KAFKAP_MSGSET_V2_OF_Attributes,
-                                msetw->msetw_Attributes);
-
-        rd_kafka_buf_update_i32(rkbuf, msetw->msetw_of_start +
-                                RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta,
-                                msgcnt-1);
-
-        rd_kafka_buf_update_i64(rkbuf, msetw->msetw_of_start +
-                                RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp,
-                                msetw->msetw_firstmsg.timestamp);
-
-        rd_kafka_buf_update_i64(rkbuf, msetw->msetw_of_start +
-                                RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp,
-                                msetw->msetw_MaxTimestamp);
-
-        rd_kafka_buf_update_i32(rkbuf, msetw->msetw_of_start +
-                                RD_KAFKAP_MSGSET_V2_OF_RecordCount, msgcnt);
-
-        rd_kafka_msgset_writer_calc_crc_v2(msetw);
-}
-
-
-
-
-/**
- * @brief Finalize the MessageSet header, if applicable.
- */
-static void
-rd_kafka_msgset_writer_finalize_MessageSet (rd_kafka_msgset_writer_t *msetw) {
-        rd_dassert(msetw->msetw_messages_len > 0);
-
-        if (msetw->msetw_MsgVersion == 2)
-                rd_kafka_msgset_writer_finalize_MessageSet_v2_header(msetw);
-        else
-                msetw->msetw_MessageSetSize = RD_KAFKAP_MSGSET_V0_SIZE +
-                        msetw->msetw_messages_len;
-
-        /* Update MessageSetSize */
-        rd_kafka_buf_update_i32(msetw->msetw_rkbuf,
-                                msetw->msetw_of_MessageSetSize,
-                                (int32_t)msetw->msetw_MessageSetSize);
-
-}
-
-
-/**
- * @brief Finalize the messageset - call when no more messages are to be
- *        added to the messageset.
- *
- *        Will compress, update final values, CRCs, etc.
- *
- *        The messageset writer is destroyed and the buffer is returned
- *        and ready to be transmitted.
- *
- * @param MessagetSetSizep will be set to the finalized MessageSetSize
- *
- * @returns the buffer to transmit or NULL if there were no messages
- *          in messageset.
- */
-static rd_kafka_buf_t *
-rd_kafka_msgset_writer_finalize (rd_kafka_msgset_writer_t *msetw,
-                                 size_t *MessageSetSizep) {
-        rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
-        rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
-        size_t len;
-        int cnt;
-
-        /* No messages added, bail out early. */
-        if (unlikely((cnt = rd_kafka_msgq_len(&rkbuf->rkbuf_msgq)) == 0)) {
-                rd_kafka_buf_destroy(rkbuf);
-                return NULL;
-        }
-
-        /* Total size of messages */
-        len = rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) -
-                msetw->msetw_firstmsg.of;
-        rd_assert(len > 0);
-        rd_assert(len <= (size_t)rktp->rktp_rkt->rkt_rk->rk_conf.max_msg_size);
-
-        /* Compress the message set */
-        if (rktp->rktp_rkt->rkt_conf.compression_codec)
-                rd_kafka_msgset_writer_compress(msetw, &len);
-
-        msetw->msetw_messages_len = len;
-
-        /* Finalize MessageSet header fields */
-        rd_kafka_msgset_writer_finalize_MessageSet(msetw);
-
-        /* Return final MessageSetSize */
-        *MessageSetSizep = msetw->msetw_MessageSetSize;
-
-        rd_rkb_dbg(msetw->msetw_rkb, MSG, "PRODUCE",
-                   "%s [%"PRId32"]: "
-                   "Produce MessageSet with %i message(s) (%"PRIusz" bytes, "
-                   "ApiVersion %d, MsgVersion %d)",
-                   rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
-                   cnt, msetw->msetw_MessageSetSize,
-                   msetw->msetw_ApiVersion, msetw->msetw_MsgVersion);
-
-
-        return rkbuf;
-}
-
-
-/**
- * @brief Create ProduceRequest containing as many messages from
- *        the toppar's transmit queue as possible, limited by configuration,
- *        size, etc.
- *
- * @param rkb broker to create buffer for
- * @param rktp toppar to transmit messages for
- * @param MessagetSetSizep will be set to the final MessageSetSize
- *
- * @returns the buffer to transmit or NULL if there were no messages
- *          in messageset.
- */
-rd_kafka_buf_t *
-rd_kafka_msgset_create_ProduceRequest (rd_kafka_broker_t *rkb,
-                                       rd_kafka_toppar_t *rktp,
-                                       size_t *MessageSetSizep) {
-
-        rd_kafka_msgset_writer_t msetw;
-
-        if (rd_kafka_msgset_writer_init(&msetw, rkb, rktp) == 0)
-                return NULL;
-
-        rd_kafka_msgset_writer_write_msgq(&msetw, &rktp->rktp_xmit_msgq);
-
-        return rd_kafka_msgset_writer_finalize(&msetw, MessageSetSizep);
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_offset.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_offset.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_offset.c
deleted file mode 100644
index b586988..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_offset.c
+++ /dev/null
@@ -1,1139 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-// FIXME: Revise this documentation:
-/**
- * This file implements the consumer offset storage.
- * It currently supports local file storage and broker OffsetCommit storage,
- * not zookeeper.
- *
- * Regardless of commit method (file, broker, ..) this is how it works:
- *  - When rdkafka, or the application, depending on if auto.offset.commit
- *    is enabled or not, calls rd_kafka_offset_store() with an offset to store,
- *    all it does is set rktp->rktp_stored_offset to this value.
- *    This can happen from any thread and is locked by the rktp lock.
- *  - The actual commit/write of the offset to its backing store (filesystem)
- *    is performed by the main rdkafka thread and scheduled at the configured
- *    auto.commit.interval.ms interval.
- *  - The write is performed in the main rdkafka thread (in a blocking manner
- *    for file based offsets) and once the write has
- *    succeeded rktp->rktp_committed_offset is updated to the new value.
- *  - If offset.store.sync.interval.ms is configured the main rdkafka thread
- *    will also make sure to fsync() each offset file accordingly. (file)
- */
-
-
-#include "rdkafka_int.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_broker.h"
-
-#include <stdio.h>
-#include <sys/types.h>
-#include <fcntl.h>
-
-#ifdef _MSC_VER
-#include <io.h>
-#include <share.h>
-#include <sys/stat.h>
-#include <Shlwapi.h>
-typedef int mode_t;
-#endif
-
-
-/**
- * Convert an absolute or logical offset to string.
- */
-const char *rd_kafka_offset2str (int64_t offset) {
-        static RD_TLS char ret[16][32];
-        static RD_TLS int i = 0;
-
-        i = (i + 1) % 16;
-
-        if (offset >= 0)
-                rd_snprintf(ret[i], sizeof(ret[i]), "%"PRId64, offset);
-        else if (offset == RD_KAFKA_OFFSET_BEGINNING)
-                return "BEGINNING";
-        else if (offset == RD_KAFKA_OFFSET_END)
-                return "END";
-        else if (offset == RD_KAFKA_OFFSET_STORED)
-                return "STORED";
-        else if (offset == RD_KAFKA_OFFSET_INVALID)
-                return "INVALID";
-        else if (offset <= RD_KAFKA_OFFSET_TAIL_BASE)
-                rd_snprintf(ret[i], sizeof(ret[i]), "TAIL(%lld)",
-			    llabs(offset - RD_KAFKA_OFFSET_TAIL_BASE));
-        else
-                rd_snprintf(ret[i], sizeof(ret[i]), "%"PRId64"?", offset);
-
-        return ret[i];
-}
-
-static void rd_kafka_offset_file_close (rd_kafka_toppar_t *rktp) {
-	if (!rktp->rktp_offset_fp)
-		return;
-
-	fclose(rktp->rktp_offset_fp);
-	rktp->rktp_offset_fp = NULL;
-}
-
-
-#ifndef _MSC_VER
-/**
- * Linux version of open callback providing racefree CLOEXEC.
- */
-int rd_kafka_open_cb_linux (const char *pathname, int flags, mode_t mode,
-                            void *opaque) {
-#ifdef O_CLOEXEC
-        return open(pathname, flags|O_CLOEXEC, mode);
-#else
-        return rd_kafka_open_cb_generic(pathname, flags, mode, opaque);
-#endif
-}
-#endif
-
-/**
- * Fallback version of open_cb NOT providing racefree CLOEXEC,
- * but setting CLOEXEC after file open (if FD_CLOEXEC is defined).
- */
-int rd_kafka_open_cb_generic (const char *pathname, int flags, mode_t mode,
-                              void *opaque) {
-#ifndef _MSC_VER
-	int fd;
-        int on = 1;
-        fd = open(pathname, flags, mode);
-        if (fd == -1)
-                return -1;
-#ifdef FD_CLOEXEC
-        fcntl(fd, F_SETFD, FD_CLOEXEC, &on);
-#endif
-        return fd;
-#else
-	int fd;
-	if (_sopen_s(&fd, pathname, flags, _SH_DENYNO, mode) != 0)
-		return -1;
-	return fd;
-#endif
-}
-
-
-static int rd_kafka_offset_file_open (rd_kafka_toppar_t *rktp) {
-        rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
-        int fd;
-
-#ifndef _MSC_VER
-	mode_t mode = 0644;
-#else
-	mode_t mode = _S_IREAD|_S_IWRITE;
-#endif
-	if ((fd = rk->rk_conf.open_cb(rktp->rktp_offset_path,
-                                      O_CREAT|O_RDWR, mode,
-                                      rk->rk_conf.opaque)) == -1) {
-		rd_kafka_op_err(rktp->rktp_rkt->rkt_rk,
-				RD_KAFKA_RESP_ERR__FS,
-				"%s [%"PRId32"]: "
-				"Failed to open offset file %s: %s",
-				rktp->rktp_rkt->rkt_topic->str,
-				rktp->rktp_partition,
-				rktp->rktp_offset_path, rd_strerror(errno));
-		return -1;
-	}
-
-	rktp->rktp_offset_fp =
-#ifndef _MSC_VER
-		fdopen(fd, "r+");
-#else
-		_fdopen(fd, "r+");
-#endif
-
-	return 0;
-}
-
-
-static int64_t rd_kafka_offset_file_read (rd_kafka_toppar_t *rktp) {
-	char buf[22];
-	char *end;
-	int64_t offset;
-	size_t r;
-
-	if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) {
-		rd_kafka_op_err(rktp->rktp_rkt->rkt_rk,
-				RD_KAFKA_RESP_ERR__FS,
-				"%s [%"PRId32"]: "
-				"Seek (for read) failed on offset file %s: %s",
-				rktp->rktp_rkt->rkt_topic->str,
-				rktp->rktp_partition,
-				rktp->rktp_offset_path,
-				rd_strerror(errno));
-		rd_kafka_offset_file_close(rktp);
-		return RD_KAFKA_OFFSET_INVALID;
-	}
-
-	r = fread(buf, 1, sizeof(buf) - 1, rktp->rktp_offset_fp);
-	if (r == 0) {
-		rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
-			     "%s [%"PRId32"]: offset file (%s) is empty",
-			     rktp->rktp_rkt->rkt_topic->str,
-			     rktp->rktp_partition,
-			     rktp->rktp_offset_path);
-		return RD_KAFKA_OFFSET_INVALID;
-	}
-
-	buf[r] = '\0';
-
-	offset = strtoull(buf, &end, 10);
-	if (buf == end) {
-		rd_kafka_op_err(rktp->rktp_rkt->rkt_rk,
-				RD_KAFKA_RESP_ERR__FS,
-				"%s [%"PRId32"]: "
-				"Unable to parse offset in %s",
-				rktp->rktp_rkt->rkt_topic->str,
-				rktp->rktp_partition,
-				rktp->rktp_offset_path);
-		return RD_KAFKA_OFFSET_INVALID;
-	}
-
-
-	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
-		     "%s [%"PRId32"]: Read offset %"PRId64" from offset "
-		     "file (%s)",
-		     rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
-		     offset, rktp->rktp_offset_path);
-
-	return offset;
-}
-
-
-/**
- * Sync/flush offset file.
- */
-static int rd_kafka_offset_file_sync (rd_kafka_toppar_t *rktp) {
-        if (!rktp->rktp_offset_fp)
-                return 0;
-
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "SYNC",
-                     "%s [%"PRId32"]: offset file sync",
-                     rktp->rktp_rkt->rkt_topic->str,
-                     rktp->rktp_partition);
-
-#ifndef _MSC_VER
-	(void)fflush(rktp->rktp_offset_fp);
-	(void)fsync(fileno(rktp->rktp_offset_fp)); // FIXME
-#else
-	// FIXME
-	// FlushFileBuffers(_get_osfhandle(fileno(rktp->rktp_offset_fp)));
-#endif
-	return 0;
-}
-
-
-/**
- * Write offset to offset file.
- *
- * Locality: toppar's broker thread
- */
-static rd_kafka_resp_err_t
-rd_kafka_offset_file_commit (rd_kafka_toppar_t *rktp) {
-	rd_kafka_itopic_t *rkt = rktp->rktp_rkt;
-	int attempt;
-        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-        int64_t offset = rktp->rktp_stored_offset;
-
-	for (attempt = 0 ; attempt < 2 ; attempt++) {
-		char buf[22];
-		int len;
-
-		if (!rktp->rktp_offset_fp)
-			if (rd_kafka_offset_file_open(rktp) == -1)
-				continue;
-
-		if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) {
-			rd_kafka_op_err(rktp->rktp_rkt->rkt_rk,
-					RD_KAFKA_RESP_ERR__FS,
-					"%s [%"PRId32"]: "
-					"Seek failed on offset file %s: %s",
-					rktp->rktp_rkt->rkt_topic->str,
-					rktp->rktp_partition,
-					rktp->rktp_offset_path,
-					rd_strerror(errno));
-                        err = RD_KAFKA_RESP_ERR__FS;
-			rd_kafka_offset_file_close(rktp);
-			continue;
-		}
-
-		len = rd_snprintf(buf, sizeof(buf), "%"PRId64"\n", offset);
-
-		if (fwrite(buf, 1, len, rktp->rktp_offset_fp) < 1) {
-			rd_kafka_op_err(rktp->rktp_rkt->rkt_rk,
-					RD_KAFKA_RESP_ERR__FS,
-					"%s [%"PRId32"]: "
-					"Failed to write offset %"PRId64" to "
-					"offset file %s: %s",
-					rktp->rktp_rkt->rkt_topic->str,
-					rktp->rktp_partition,
-					offset,
-					rktp->rktp_offset_path,
-					rd_strerror(errno));
-                        err = RD_KAFKA_RESP_ERR__FS;
-			rd_kafka_offset_file_close(rktp);
-			continue;
-		}
-
-                /* Need to flush before truncate to preserve write ordering */
-                (void)fflush(rktp->rktp_offset_fp);
-
-		/* Truncate file */
-#ifdef _MSC_VER
-		if (_chsize_s(_fileno(rktp->rktp_offset_fp), len) == -1)
-			; /* Ignore truncate failures */
-#else
-		if (ftruncate(fileno(rktp->rktp_offset_fp), len) == -1)
-			; /* Ignore truncate failures */
-#endif
-		rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
-			     "%s [%"PRId32"]: wrote offset %"PRId64" to "
-			     "file %s",
-			     rktp->rktp_rkt->rkt_topic->str,
-			     rktp->rktp_partition, offset,
-			     rktp->rktp_offset_path);
-
-		rktp->rktp_committed_offset = offset;
-
-		/* If sync interval is set to immediate we sync right away. */
-		if (rkt->rkt_conf.offset_store_sync_interval_ms == 0)
-			rd_kafka_offset_file_sync(rktp);
-
-
-		return RD_KAFKA_RESP_ERR_NO_ERROR;
-	}
-
-
-	return err;
-}
-
-
-/**
- * Enqueue offset_commit_cb op, if configured.
- *
- */
-void rd_kafka_offset_commit_cb_op (rd_kafka_t *rk,
-				   rd_kafka_resp_err_t err,
-				   const rd_kafka_topic_partition_list_t *offsets) {
-	rd_kafka_op_t *rko;
-
-        if (!(rk->rk_conf.enabled_events & RD_KAFKA_EVENT_OFFSET_COMMIT))
-		return;
-
-	rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT|RD_KAFKA_OP_REPLY);
-        rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH);
-	rko->rko_err = err;
-	rko->rko_u.offset_commit.cb = rk->rk_conf.offset_commit_cb;/*maybe NULL*/
-	rko->rko_u.offset_commit.opaque = rk->rk_conf.opaque;
-	if (offsets)
-		rko->rko_u.offset_commit.partitions =
-			rd_kafka_topic_partition_list_copy(offsets);
-	rd_kafka_q_enq(rk->rk_rep, rko);
-}
-
-
-
-
-/**
- * Commit a list of offsets asynchronously. Response will be queued on 'replyq'.
- * Optional \p cb will be set on requesting op.
- *
- * Makes a copy of \p offsets (may be NULL for current assignment)
- */
-static rd_kafka_resp_err_t
-rd_kafka_commit0 (rd_kafka_t *rk,
-                  const rd_kafka_topic_partition_list_t *offsets,
-		  rd_kafka_toppar_t *rktp,
-                  rd_kafka_replyq_t replyq,
-		  void (*cb) (rd_kafka_t *rk,
-			      rd_kafka_resp_err_t err,
-			      rd_kafka_topic_partition_list_t *offsets,
-			      void *opaque),
-		  void *opaque,
-                  const char *reason) {
-        rd_kafka_cgrp_t *rkcg;
-        rd_kafka_op_t *rko;
-
-        if (!(rkcg = rd_kafka_cgrp_get(rk)))
-                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
-        rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT);
-        rko->rko_u.offset_commit.reason = rd_strdup(reason);
-	rko->rko_replyq = replyq;
-	rko->rko_u.offset_commit.cb = cb;
-	rko->rko_u.offset_commit.opaque = opaque;
-	if (rktp)
-		rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
-        if (offsets)
-		rko->rko_u.offset_commit.partitions =
-                        rd_kafka_topic_partition_list_copy(offsets);
-
-        rd_kafka_q_enq(rkcg->rkcg_ops, rko);
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-
-/**
- * NOTE: 'offsets' may be NULL, see official documentation.
- */
-rd_kafka_resp_err_t
-rd_kafka_commit (rd_kafka_t *rk,
-                 const rd_kafka_topic_partition_list_t *offsets, int async) {
-        rd_kafka_cgrp_t *rkcg;
-	rd_kafka_resp_err_t err;
-	rd_kafka_q_t *repq = NULL;
-	rd_kafka_replyq_t rq = RD_KAFKA_NO_REPLYQ;
-
-        if (!(rkcg = rd_kafka_cgrp_get(rk)))
-                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
-        if (!async)
-                repq = rd_kafka_q_new(rk);
-
-        if (!async) 
-		rq = RD_KAFKA_REPLYQ(repq, 0);
- 
-        err = rd_kafka_commit0(rk, offsets, NULL, rq, NULL, NULL, "manual");
-
-        if (!err && !async) {
-		err = rd_kafka_q_wait_result(repq, RD_POLL_INFINITE);
-		rd_kafka_q_destroy(repq);
-        }
-
-	return err;
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage,
-                         int async) {
-        rd_kafka_topic_partition_list_t *offsets;
-        rd_kafka_topic_partition_t *rktpar;
-        rd_kafka_resp_err_t err;
-
-        if (rkmessage->err)
-                return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
-        offsets = rd_kafka_topic_partition_list_new(1);
-        rktpar = rd_kafka_topic_partition_list_add(
-                offsets, rd_kafka_topic_name(rkmessage->rkt),
-                rkmessage->partition);
-        rktpar->offset = rkmessage->offset+1;
-
-        err = rd_kafka_commit(rk, offsets, async);
-
-        rd_kafka_topic_partition_list_destroy(offsets);
-
-        return err;
-}
-
-
-
-rd_kafka_resp_err_t
-rd_kafka_commit_queue (rd_kafka_t *rk,
-		       const rd_kafka_topic_partition_list_t *offsets,
-		       rd_kafka_queue_t *rkqu,
-		       void (*cb) (rd_kafka_t *rk,
-				   rd_kafka_resp_err_t err,
-				   rd_kafka_topic_partition_list_t *offsets,
-				   void *opaque),
-		       void *opaque) {
-	rd_kafka_q_t *rkq;
-	rd_kafka_resp_err_t err;
-
-        if (!rd_kafka_cgrp_get(rk))
-                return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
-	if (rkqu)
-		rkq = rkqu->rkqu_q;
-	else
-		rkq = rd_kafka_q_new(rk);
-
-	err = rd_kafka_commit0(rk, offsets, NULL,
-			       RD_KAFKA_REPLYQ(rkq, 0),
-			       cb, opaque, "manual");
-
-	if (!rkqu) {
-                rd_kafka_op_t *rko =
-                        rd_kafka_q_pop_serve(rkq, RD_POLL_INFINITE,
-                                             0, RD_KAFKA_Q_CB_FORCE_RETURN,
-                                             NULL, NULL);
-		if (!rko)
-			err = RD_KAFKA_RESP_ERR__TIMED_OUT;
-		else {
-                        if (cb)
-                                cb(rk, rko->rko_err,
-                                   rko->rko_u.offset_commit.partitions,
-                                   opaque);
-                        err = rko->rko_err;
-                        rd_kafka_op_destroy(rko);
-                }
-
-                rd_kafka_q_destroy(rkq);
-	}
-
-	return err;
-}
-
-
-
-
-/**
- * Called when a broker commit is done.
- *
- * Locality: toppar handler thread
- * Locks: none
- */
-static void
-rd_kafka_offset_broker_commit_cb (rd_kafka_t *rk,
-				  rd_kafka_resp_err_t err,
-				  rd_kafka_topic_partition_list_t *offsets,
-				  void *opaque) {
-        shptr_rd_kafka_toppar_t *s_rktp;
-        rd_kafka_toppar_t *rktp;
-        rd_kafka_topic_partition_t *rktpar;
-
-        if (offsets->cnt == 0) {
-                rd_kafka_dbg(rk, TOPIC, "OFFSETCOMMIT",
-                             "No offsets to commit (commit_cb)");
-                return;
-        }
-
-        rktpar = &offsets->elems[0];
-
-        if (!(s_rktp = rd_kafka_topic_partition_list_get_toppar(rk, rktpar))) {
-		rd_kafka_dbg(rk, TOPIC, "OFFSETCOMMIT",
-			     "No local partition found for %s [%"PRId32"] "
-			     "while parsing OffsetCommit response "
-			     "(offset %"PRId64", error \"%s\")",
-			     rktpar->topic,
-			     rktpar->partition,
-			     rktpar->offset,
-			     rd_kafka_err2str(rktpar->err));
-                return;
-        }
-
-        rktp = rd_kafka_toppar_s2i(s_rktp);
-
-        if (!err)
-                err = rktpar->err;
-
-	rd_kafka_toppar_offset_commit_result(rktp, err, offsets);
-
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
-                     "%s [%"PRId32"]: offset %"PRId64" committed: %s",
-                     rktp->rktp_rkt->rkt_topic->str,
-                     rktp->rktp_partition, rktpar->offset,
-                     rd_kafka_err2str(err));
-
-        rktp->rktp_committing_offset = 0;
-
-        rd_kafka_toppar_lock(rktp);
-        if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING)
-                rd_kafka_offset_store_term(rktp, err);
-        rd_kafka_toppar_unlock(rktp);
-
-        rd_kafka_toppar_destroy(s_rktp);
-}
-
-
-static rd_kafka_resp_err_t
-rd_kafka_offset_broker_commit (rd_kafka_toppar_t *rktp, const char *reason) {
-        rd_kafka_topic_partition_list_t *offsets;
-        rd_kafka_topic_partition_t *rktpar;
-
-        rd_kafka_assert(rktp->rktp_rkt->rkt_rk, rktp->rktp_cgrp != NULL);
-        rd_kafka_assert(rktp->rktp_rkt->rkt_rk,
-                        rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE);
-
-        rktp->rktp_committing_offset = rktp->rktp_stored_offset;
-
-        offsets = rd_kafka_topic_partition_list_new(1);
-        rktpar = rd_kafka_topic_partition_list_add(
-                offsets, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
-        rktpar->offset = rktp->rktp_committing_offset;
-
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSETCMT",
-                     "%.*s [%"PRId32"]: committing offset %"PRId64": %s",
-                     RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
-                     rktp->rktp_partition, rktp->rktp_committing_offset,
-                     reason);
-
-        rd_kafka_commit0(rktp->rktp_rkt->rkt_rk, offsets, rktp,
-			 RD_KAFKA_REPLYQ(rktp->rktp_ops, 0),
-			 rd_kafka_offset_broker_commit_cb, NULL,
-                         reason);
-
-        rd_kafka_topic_partition_list_destroy(offsets);
-
-        return RD_KAFKA_RESP_ERR__IN_PROGRESS;
-}
-
-
-
-
-/**
- * Commit offset to backing store.
- * This might be an async operation.
- *
- * Locality: toppar handler thread
- */
-static
-rd_kafka_resp_err_t rd_kafka_offset_commit (rd_kafka_toppar_t *rktp,
-                                            const char *reason) {
-        if (1)  // FIXME
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
-		     "%s [%"PRId32"]: commit: "
-		     "stored offset %"PRId64" > committed offset %"PRId64"?",
-		     rktp->rktp_rkt->rkt_topic->str,
-		     rktp->rktp_partition,
-		     rktp->rktp_stored_offset, rktp->rktp_committed_offset);
-
-        /* Already committed */
-        if (rktp->rktp_stored_offset <= rktp->rktp_committed_offset)
-                return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-        /* Already committing (for async ops) */
-        if (rktp->rktp_stored_offset <= rktp->rktp_committing_offset)
-                return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
-
-        switch (rktp->rktp_rkt->rkt_conf.offset_store_method)
-        {
-        case RD_KAFKA_OFFSET_METHOD_FILE:
-                return rd_kafka_offset_file_commit(rktp);
-        case RD_KAFKA_OFFSET_METHOD_BROKER:
-                return rd_kafka_offset_broker_commit(rktp, reason);
-        default:
-                /* UNREACHABLE */
-                return RD_KAFKA_RESP_ERR__INVALID_ARG;
-        }
-}
-
-
-
-
-
-/**
- * Sync offset backing store. This is only used for METHOD_FILE.
- *
- * Locality: rktp's broker thread.
- */
-rd_kafka_resp_err_t rd_kafka_offset_sync (rd_kafka_toppar_t *rktp) {
-        switch (rktp->rktp_rkt->rkt_conf.offset_store_method)
-        {
-        case RD_KAFKA_OFFSET_METHOD_FILE:
-                return rd_kafka_offset_file_sync(rktp);
-        default:
-                return RD_KAFKA_RESP_ERR__INVALID_ARG;
-        }
-}
-
-
-/**
- * Store offset.
- * Typically called from application code.
- *
- * NOTE: No locks must be held.
- */
-rd_kafka_resp_err_t rd_kafka_offset_store (rd_kafka_topic_t *app_rkt,
-					   int32_t partition, int64_t offset) {
-        rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt);
-	shptr_rd_kafka_toppar_t *s_rktp;
-
-	/* Find toppar */
-	rd_kafka_topic_rdlock(rkt);
-	if (!(s_rktp = rd_kafka_toppar_get(rkt, partition, 0/*!ua_on_miss*/))) {
-		rd_kafka_topic_rdunlock(rkt);
-		return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-	}
-	rd_kafka_topic_rdunlock(rkt);
-
-	rd_kafka_offset_store0(rd_kafka_toppar_s2i(s_rktp), offset+1,
-                               1/*lock*/);
-
-	rd_kafka_toppar_destroy(s_rktp);
-
-	return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_offsets_store (rd_kafka_t *rk,
-                        rd_kafka_topic_partition_list_t *offsets) {
-        int i;
-        int ok_cnt = 0;
-
-        for (i = 0 ; i < offsets->cnt ; i++) {
-                rd_kafka_topic_partition_t *rktpar = &offsets->elems[i];
-                shptr_rd_kafka_toppar_t *s_rktp;
-
-                s_rktp = rd_kafka_topic_partition_get_toppar(rk, rktpar);
-                if (!s_rktp) {
-                        rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-                        continue;
-                }
-
-                rd_kafka_offset_store0(rd_kafka_toppar_s2i(s_rktp),
-                                       rktpar->offset, 1/*lock*/);
-                rd_kafka_toppar_destroy(s_rktp);
-
-                rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
-                ok_cnt++;
-        }
-
-        return offsets->cnt > 0 && ok_cnt < offsets->cnt ?
-                RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION :
-                RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-
-
-
-/**
- * Decommissions the use of an offset file for a toppar.
- * The file content will not be touched and the file will not be removed.
- */
-static rd_kafka_resp_err_t rd_kafka_offset_file_term (rd_kafka_toppar_t *rktp) {
-        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
-        /* Sync offset file if the sync is intervalled (> 0) */
-        if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0) {
-                rd_kafka_offset_file_sync(rktp);
-		rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
-				    &rktp->rktp_offset_sync_tmr, 1/*lock*/);
-	}
-
-
-	rd_kafka_offset_file_close(rktp);
-
-	rd_free(rktp->rktp_offset_path);
-	rktp->rktp_offset_path = NULL;
-
-        return err;
-}
-
-static rd_kafka_op_res_t
-rd_kafka_offset_reset_op_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq,
-                             rd_kafka_op_t *rko) {
-	rd_kafka_toppar_t *rktp =
-		rd_kafka_toppar_s2i(rko->rko_rktp);
-	rd_kafka_toppar_lock(rktp);
-        rd_kafka_offset_reset(rktp,
-                              rko->rko_u.offset_reset.offset,
-                              rko->rko_err, rko->rko_u.offset_reset.reason);
-	rd_kafka_toppar_unlock(rktp);
-        return RD_KAFKA_OP_RES_HANDLED;
-}
-
-/**
- * Take action when the offset for a toppar becomes unusable.
- *
- * Locality: toppar handler thread
- * Locks: toppar_lock() MUST be held
- */
-void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset,
-			    rd_kafka_resp_err_t err, const char *reason) {
-	int64_t offset = RD_KAFKA_OFFSET_INVALID;
-	rd_kafka_op_t *rko;
-
-        /* Enqueue op for toppar handler thread if we're on the wrong thread. */
-        if (!thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)) {
-                rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_RESET |
-						     RD_KAFKA_OP_CB);
-                rko->rko_op_cb = rd_kafka_offset_reset_op_cb;
-                rko->rko_err = err;
-                rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-                rko->rko_u.offset_reset.offset = err_offset;
-		rko->rko_u.offset_reset.reason = rd_strdup(reason);
-                rd_kafka_q_enq(rktp->rktp_ops, rko);
-                return;
-        }
-
-	if (err_offset == RD_KAFKA_OFFSET_INVALID || err)
-		offset = rktp->rktp_rkt->rkt_conf.auto_offset_reset;
-	else
-		offset = err_offset;
-
-	if (offset == RD_KAFKA_OFFSET_INVALID) {
-		/* Error, auto.offset.reset tells us to error out. */
-		rko = rd_kafka_op_new(RD_KAFKA_OP_CONSUMER_ERR);
-
-		rko->rko_err               = err;
-		rko->rko_u.err.offset      = err_offset;
-		rko->rko_u.err.errstr      = rd_strdup(reason);
-                rko->rko_rktp        = rd_kafka_toppar_keep(rktp);
-
-		rd_kafka_q_enq(rktp->rktp_fetchq, rko);
-                rd_kafka_toppar_set_fetch_state(
-			rktp, RD_KAFKA_TOPPAR_FETCH_NONE);
-
-	} else {
-		/* Query logical offset */
-		rktp->rktp_query_offset = offset;
-                rd_kafka_toppar_set_fetch_state(
-			rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
-	}
-
-	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
-		     "%s [%"PRId32"]: offset reset (at offset %s) "
-		     "to %s: %s: %s",
-		     rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
-		     rd_kafka_offset2str(err_offset),
-                     rd_kafka_offset2str(offset),
-                     reason, rd_kafka_err2str(err));
-
-	if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
-		rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_offset, 0);
-}
-
-
-/**
- * Escape any special characters in filename 'in' and write escaped
- * string to 'out' (of max size out_size).
- */
-static char *mk_esc_filename (const char *in, char *out, size_t out_size) {
-        const char *s = in;
-        char *o = out;
-
-        while (*s) {
-                const char *esc;
-                size_t esclen;
-
-                switch (*s)
-                {
-                case '/': /* linux */
-                        esc = "%2F";
-                        esclen = strlen(esc);
-                        break;
-                case ':': /* osx, windows */
-                        esc = "%3A";
-                        esclen = strlen(esc);
-                        break;
-                case '\\': /* windows */
-                        esc = "%5C";
-                        esclen = strlen(esc);
-                        break;
-                default:
-                        esc = s;
-                        esclen = 1;
-                        break;
-                }
-
-                if ((size_t)((o + esclen + 1) - out) >= out_size) {
-                        /* No more space in output string, truncate. */
-                        break;
-                }
-
-                while (esclen-- > 0)
-                        *(o++) = *(esc++);
-
-                s++;
-        }
-
-        *o = '\0';
-        return out;
-}
-
-
-static void rd_kafka_offset_sync_tmr_cb (rd_kafka_timers_t *rkts, void *arg) {
-	rd_kafka_toppar_t *rktp = arg;
-	rd_kafka_offset_sync(rktp);
-}
-
-
-/**
- * Prepare a toppar for using an offset file.
- *
- * Locality: rdkafka main thread
- * Locks: toppar_lock(rktp) must be held
- */
-static void rd_kafka_offset_file_init (rd_kafka_toppar_t *rktp) {
-	char spath[4096];
-	const char *path = rktp->rktp_rkt->rkt_conf.offset_store_path;
-	int64_t offset = RD_KAFKA_OFFSET_INVALID;
-
-	if (rd_kafka_path_is_dir(path)) {
-                char tmpfile[1024];
-                char escfile[4096];
-
-                /* Include group.id in filename if configured. */
-                if (!RD_KAFKAP_STR_IS_NULL(rktp->rktp_rkt->rkt_rk->rk_group_id))
-                        rd_snprintf(tmpfile, sizeof(tmpfile),
-                                 "%s-%"PRId32"-%.*s.offset",
-                                 rktp->rktp_rkt->rkt_topic->str,
-                                 rktp->rktp_partition,
-                                 RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_rk->
-                                                  rk_group_id));
-                else
-                        rd_snprintf(tmpfile, sizeof(tmpfile),
-                                 "%s-%"PRId32".offset",
-                                 rktp->rktp_rkt->rkt_topic->str,
-                                 rktp->rktp_partition);
-
-                /* Escape filename to make it safe. */
-                mk_esc_filename(tmpfile, escfile, sizeof(escfile));
-
-                rd_snprintf(spath, sizeof(spath), "%s%s%s",
-                         path, path[strlen(path)-1] == '/' ? "" : "/", escfile);
-
-		path = spath;
-	}
-
-	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
-		     "%s [%"PRId32"]: using offset file %s",
-		     rktp->rktp_rkt->rkt_topic->str,
-		     rktp->rktp_partition,
-		     path);
-	rktp->rktp_offset_path = rd_strdup(path);
-
-
-        /* Set up the offset file sync interval. */
- 	if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0)
-		rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers,
-				     &rktp->rktp_offset_sync_tmr,
-				     rktp->rktp_rkt->rkt_conf.
-				     offset_store_sync_interval_ms * 1000ll,
-				     rd_kafka_offset_sync_tmr_cb, rktp);
-
-	if (rd_kafka_offset_file_open(rktp) != -1) {
-		/* Read offset from offset file. */
-		offset = rd_kafka_offset_file_read(rktp);
-	}
-
-	if (offset != RD_KAFKA_OFFSET_INVALID) {
-		/* Start fetching from offset */
-		rktp->rktp_stored_offset = offset;
-		rktp->rktp_committed_offset = offset;
-                rd_kafka_toppar_next_offset_handle(rktp, offset);
-
-	} else {
-		/* Offset was not usable: perform offset reset logic */
-		rktp->rktp_committed_offset = RD_KAFKA_OFFSET_INVALID;
-		rd_kafka_offset_reset(rktp, RD_KAFKA_OFFSET_INVALID,
-				      RD_KAFKA_RESP_ERR__FS,
-				      "non-readable offset file");
-	}
-}
-
-
-
-/**
- * Terminate broker offset store
- */
-static rd_kafka_resp_err_t rd_kafka_offset_broker_term (rd_kafka_toppar_t *rktp){
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * Prepare a toppar for using broker offset commit (broker 0.8.2 or later).
- * When using KafkaConsumer (high-level consumer) this functionality is
- * disabled in favour of the cgrp commits for the entire set of subscriptions.
- */
-static void rd_kafka_offset_broker_init (rd_kafka_toppar_t *rktp) {
-        if (!rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk))
-                return;
-        rd_kafka_offset_reset(rktp, RD_KAFKA_OFFSET_STORED, 0,
-                              "query broker for offsets");
-}
-
-
-/**
- * Terminates toppar's offset store, this is the finalizing step after
- * offset_store_stop().
- *
- * Locks: rd_kafka_toppar_lock() MUST be held.
- */
-void rd_kafka_offset_store_term (rd_kafka_toppar_t *rktp,
-                                 rd_kafka_resp_err_t err) {
-        rd_kafka_resp_err_t err2;
-
-	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "STORETERM",
-		     "%s [%"PRId32"]: offset store terminating",
-                     rktp->rktp_rkt->rkt_topic->str,
-		     rktp->rktp_partition);
-
-        rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING;
-
-	rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
-			    &rktp->rktp_offset_commit_tmr, 1/*lock*/);
-
-        switch (rktp->rktp_rkt->rkt_conf.offset_store_method)
-        {
-        case RD_KAFKA_OFFSET_METHOD_FILE:
-                err2 = rd_kafka_offset_file_term(rktp);
-                break;
-        case RD_KAFKA_OFFSET_METHOD_BROKER:
-                err2 = rd_kafka_offset_broker_term(rktp);
-                break;
-        case RD_KAFKA_OFFSET_METHOD_NONE:
-                err2 = RD_KAFKA_RESP_ERR_NO_ERROR;
-                break;
-        }
-
-        /* Prioritize the input error (probably from commit), fall
-         * back on termination error. */
-        if (!err)
-                err = err2;
-
-        rd_kafka_toppar_fetch_stopped(rktp, err);
-
-}
-
-
-/**
- * Stop toppar's offset store, committing the final offsets, etc.
- *
- * Returns RD_KAFKA_RESP_ERR_NO_ERROR on success,
- * RD_KAFKA_RESP_ERR__IN_PROGRESS if the term triggered an
- * async operation (e.g., broker offset commit), or
- * any other error in case of immediate failure.
- *
- * The offset layer will call rd_kafka_offset_store_term() when
- * the offset management has been fully stopped for this partition.
- *
- * Locks: rd_kafka_toppar_lock() MUST be held.
- */
-rd_kafka_resp_err_t rd_kafka_offset_store_stop (rd_kafka_toppar_t *rktp) {
-        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
-        if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE))
-                goto done;
-
-        rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING;
-
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
-                     "%s [%"PRId32"]: stopping offset store "
-                     "(stored offset %"PRId64
-                     ", committed offset %"PRId64", EOF offset %"PRId64")",
-                     rktp->rktp_rkt->rkt_topic->str,
-		     rktp->rktp_partition,
-		     rktp->rktp_stored_offset, rktp->rktp_committed_offset,
-                     rktp->rktp_offsets_fin.eof_offset);
-
-        /* Store end offset for empty partitions */
-        if (rktp->rktp_rkt->rkt_rk->rk_conf.enable_auto_offset_store &&
-            rktp->rktp_stored_offset == RD_KAFKA_OFFSET_INVALID &&
-            rktp->rktp_offsets_fin.eof_offset > 0)
-                rd_kafka_offset_store0(rktp, rktp->rktp_offsets_fin.eof_offset,
-                                       0/*no lock*/);
-
-        /* Commit offset to backing store.
-         * This might be an async operation. */
-        if (rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk) &&
-            rktp->rktp_stored_offset > rktp->rktp_committed_offset)
-                err = rd_kafka_offset_commit(rktp, "offset store stop");
-
-        /* If stop is in progress (async commit), return now. */
-        if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS)
-                return err;
-
-done:
-        /* Stop is done */
-        rd_kafka_offset_store_term(rktp, err);
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-static void rd_kafka_offset_auto_commit_tmr_cb (rd_kafka_timers_t *rkts,
-						 void *arg) {
-	rd_kafka_toppar_t *rktp = arg;
-	rd_kafka_offset_commit(rktp, "auto commit timer");
-}
-
-void rd_kafka_offset_query_tmr_cb (rd_kafka_timers_t *rkts, void *arg) {
-	rd_kafka_toppar_t *rktp = arg;
-	rd_kafka_toppar_lock(rktp);
-	rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
-		     "Topic %s [%"PRId32"]: timed offset query for %s in "
-		     "state %s",
-		     rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
-		     rd_kafka_offset2str(rktp->rktp_query_offset),
-		     rd_kafka_fetch_states[rktp->rktp_fetch_state]);
-	rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_offset, 0);
-	rd_kafka_toppar_unlock(rktp);
-}
-
-
-/**
- * Initialize toppar's offset store.
- *
- * Locality: toppar handler thread
- */
-void rd_kafka_offset_store_init (rd_kafka_toppar_t *rktp) {
-        static const char *store_names[] = { "none", "file", "broker" };
-
-        rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
-                     "%s [%"PRId32"]: using offset store method: %s",
-                     rktp->rktp_rkt->rkt_topic->str,
-                     rktp->rktp_partition,
-                     store_names[rktp->rktp_rkt->rkt_conf.offset_store_method]);
-
-        /* The committed offset is unknown at this point. */
-        rktp->rktp_committed_offset = RD_KAFKA_OFFSET_INVALID;
-
-        /* Set up the commit interval (for simple consumer). */
-        if (rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk) &&
-            rktp->rktp_rkt->rkt_conf.auto_commit_interval_ms > 0)
-		rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers,
-				     &rktp->rktp_offset_commit_tmr,
-				     rktp->rktp_rkt->rkt_conf.
-				     auto_commit_interval_ms * 1000ll,
-				     rd_kafka_offset_auto_commit_tmr_cb,
-				     rktp);
-
-        switch (rktp->rktp_rkt->rkt_conf.offset_store_method)
-        {
-        case RD_KAFKA_OFFSET_METHOD_FILE:
-                rd_kafka_offset_file_init(rktp);
-                break;
-        case RD_KAFKA_OFFSET_METHOD_BROKER:
-                rd_kafka_offset_broker_init(rktp);
-                break;
-        case RD_KAFKA_OFFSET_METHOD_NONE:
-                break;
-        default:
-                /* NOTREACHED */
-                return;
-        }
-
-        rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_OFFSET_STORE;
-}
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_offset.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_offset.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_offset.h
deleted file mode 100644
index a9e8655..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_offset.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include "rdkafka_partition.h"
-
-
-const char *rd_kafka_offset2str (int64_t offset);
-
-
-/**
- * Stores the offset for the toppar 'rktp'.
- * The actual commit of the offset to backing store is usually
- * performed at a later time (time or threshold based).
- *
- * See head of rdkafka_offset.c for more information.
- */
-static RD_INLINE RD_UNUSED
-void rd_kafka_offset_store0 (rd_kafka_toppar_t *rktp, int64_t offset,
-			     int lock) {
-	if (lock)
-		rd_kafka_toppar_lock(rktp);
-	rktp->rktp_stored_offset = offset;
-	if (lock)
-		rd_kafka_toppar_unlock(rktp);
-}
-
-rd_kafka_resp_err_t rd_kafka_offset_store (rd_kafka_topic_t *rkt,
-					   int32_t partition, int64_t offset);
-
-rd_kafka_resp_err_t rd_kafka_offset_sync (rd_kafka_toppar_t *rktp);
-
-void rd_kafka_offset_store_term (rd_kafka_toppar_t *rktp,
-                                 rd_kafka_resp_err_t err);
-rd_kafka_resp_err_t rd_kafka_offset_store_stop (rd_kafka_toppar_t *rktp);
-void rd_kafka_offset_store_init (rd_kafka_toppar_t *rktp);
-
-void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset,
-			    rd_kafka_resp_err_t err, const char *reason);
-
-void rd_kafka_offset_query_tmr_cb (rd_kafka_timers_t *rkts, void *arg);
-
-void rd_kafka_offset_commit_cb_op (rd_kafka_t *rk,
-				   rd_kafka_resp_err_t err,
-				   const rd_kafka_topic_partition_list_t *offsets);
-


[07/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/examples/rdkafka_example.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/examples/rdkafka_example.c b/thirdparty/librdkafka-0.11.4/examples/rdkafka_example.c
new file mode 100644
index 0000000..77c345e
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/examples/rdkafka_example.c
@@ -0,0 +1,885 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met: 
+ * 
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer. 
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution. 
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer example programs
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <ctype.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <syslog.h>
+#include <time.h>
+#include <sys/time.h>
+#include <getopt.h>
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"  /* for Kafka driver */
+
+
+static int run = 1;
+static rd_kafka_t *rk;
+static int exit_eof = 0;
+static int quiet = 0;
+static 	enum {
+	OUTPUT_HEXDUMP,
+	OUTPUT_RAW,
+} output = OUTPUT_HEXDUMP;
+
+static void stop (int sig) {
+	run = 0;
+	fclose(stdin); /* abort fgets() */
+}
+
+
+static void hexdump (FILE *fp, const char *name, const void *ptr, size_t len) {
+	const char *p = (const char *)ptr;
+	size_t of = 0;
+
+
+	if (name)
+		fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
+
+	for (of = 0 ; of < len ; of += 16) {
+		char hexen[16*3+1];
+		char charen[16+1];
+		int hof = 0;
+
+		int cof = 0;
+		int i;
+
+		for (i = of ; i < (int)of + 16 && i < (int)len ; i++) {
+			hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff);
+			cof += sprintf(charen+cof, "%c",
+				       isprint((int)p[i]) ? p[i] : '.');
+		}
+		fprintf(fp, "%08zx: %-48s %-16s\n",
+			of, hexen, charen);
+	}
+}
+
+/**
+ * Kafka logger callback (optional)
+ */
+static void logger (const rd_kafka_t *rk, int level,
+		    const char *fac, const char *buf) {
+	struct timeval tv;
+	gettimeofday(&tv, NULL);
+	fprintf(stderr, "%u.%03u RDKAFKA-%i-%s: %s: %s\n",
+		(int)tv.tv_sec, (int)(tv.tv_usec / 1000),
+		level, fac, rk ? rd_kafka_name(rk) : NULL, buf);
+}
+
+/**
+ * Message delivery report callback.
+ * Called once for each message.
+ * See rdkafka.h for more information.
+ */
+static void msg_delivered (rd_kafka_t *rk,
+			   void *payload, size_t len,
+			   int error_code,
+			   void *opaque, void *msg_opaque) {
+
+	if (error_code)
+		fprintf(stderr, "%% Message delivery failed: %s\n",
+			rd_kafka_err2str(error_code));
+	else if (!quiet)
+		fprintf(stderr, "%% Message delivered (%zd bytes): %.*s\n", len,
+			(int)len, (const char *)payload);
+}
+
+/**
+ * Message delivery report callback using the richer rd_kafka_message_t object.
+ */
+static void msg_delivered2 (rd_kafka_t *rk,
+                            const rd_kafka_message_t *rkmessage, void *opaque) {
+	printf("del: %s: offset %"PRId64"\n",
+	       rd_kafka_err2str(rkmessage->err), rkmessage->offset);
+        if (rkmessage->err)
+		fprintf(stderr, "%% Message delivery failed: %s\n",
+                        rd_kafka_err2str(rkmessage->err));
+	else if (!quiet)
+		fprintf(stderr,
+                        "%% Message delivered (%zd bytes, offset %"PRId64", "
+                        "partition %"PRId32"): %.*s\n",
+                        rkmessage->len, rkmessage->offset,
+			rkmessage->partition,
+			(int)rkmessage->len, (const char *)rkmessage->payload);
+}
+
+
+static void msg_consume (rd_kafka_message_t *rkmessage,
+			 void *opaque) {
+	if (rkmessage->err) {
+		if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+			fprintf(stderr,
+				"%% Consumer reached end of %s [%"PRId32"] "
+			       "message queue at offset %"PRId64"\n",
+			       rd_kafka_topic_name(rkmessage->rkt),
+			       rkmessage->partition, rkmessage->offset);
+
+			if (exit_eof)
+				run = 0;
+
+			return;
+		}
+
+		fprintf(stderr, "%% Consume error for topic \"%s\" [%"PRId32"] "
+		       "offset %"PRId64": %s\n",
+		       rd_kafka_topic_name(rkmessage->rkt),
+		       rkmessage->partition,
+		       rkmessage->offset,
+		       rd_kafka_message_errstr(rkmessage));
+
+                if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
+                    rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+                        run = 0;
+		return;
+	}
+
+	if (!quiet) {
+		rd_kafka_timestamp_type_t tstype;
+		int64_t timestamp;
+                rd_kafka_headers_t *hdrs;
+
+		fprintf(stdout, "%% Message (offset %"PRId64", %zd bytes):\n",
+			rkmessage->offset, rkmessage->len);
+
+		timestamp = rd_kafka_message_timestamp(rkmessage, &tstype);
+		if (tstype != RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) {
+			const char *tsname = "?";
+			if (tstype == RD_KAFKA_TIMESTAMP_CREATE_TIME)
+				tsname = "create time";
+			else if (tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME)
+				tsname = "log append time";
+
+			fprintf(stdout, "%% Message timestamp: %s %"PRId64
+				" (%ds ago)\n",
+				tsname, timestamp,
+				!timestamp ? 0 :
+				(int)time(NULL) - (int)(timestamp/1000));
+		}
+
+                if (!rd_kafka_message_headers(rkmessage, &hdrs)) {
+                        size_t idx = 0;
+                        const char *name;
+                        const void *val;
+                        size_t size;
+
+                        fprintf(stdout, "%% Headers:");
+
+                        while (!rd_kafka_header_get_all(hdrs, idx++,
+                                                        &name, &val, &size)) {
+                                fprintf(stdout, "%s%s=",
+                                        idx == 1 ? " " : ", ", name);
+                                if (val)
+                                        fprintf(stdout, "\"%.*s\"",
+                                                (int)size, (const char *)val);
+                                else
+                                        fprintf(stdout, "NULL");
+                        }
+                        fprintf(stdout, "\n");
+                }
+	}
+
+	if (rkmessage->key_len) {
+		if (output == OUTPUT_HEXDUMP)
+			hexdump(stdout, "Message Key",
+				rkmessage->key, rkmessage->key_len);
+		else
+			printf("Key: %.*s\n",
+			       (int)rkmessage->key_len, (char *)rkmessage->key);
+	}
+
+	if (output == OUTPUT_HEXDUMP)
+		hexdump(stdout, "Message Payload",
+			rkmessage->payload, rkmessage->len);
+	else
+		printf("%.*s\n",
+		       (int)rkmessage->len, (char *)rkmessage->payload);
+}
+
+
+static void metadata_print (const char *topic,
+                            const struct rd_kafka_metadata *metadata) {
+        int i, j, k;
+
+        printf("Metadata for %s (from broker %"PRId32": %s):\n",
+               topic ? : "all topics",
+               metadata->orig_broker_id,
+               metadata->orig_broker_name);
+
+
+        /* Iterate brokers */
+        printf(" %i brokers:\n", metadata->broker_cnt);
+        for (i = 0 ; i < metadata->broker_cnt ; i++)
+                printf("  broker %"PRId32" at %s:%i\n",
+                       metadata->brokers[i].id,
+                       metadata->brokers[i].host,
+                       metadata->brokers[i].port);
+
+        /* Iterate topics */
+        printf(" %i topics:\n", metadata->topic_cnt);
+        for (i = 0 ; i < metadata->topic_cnt ; i++) {
+                const struct rd_kafka_metadata_topic *t = &metadata->topics[i];
+                printf("  topic \"%s\" with %i partitions:",
+                       t->topic,
+                       t->partition_cnt);
+                if (t->err) {
+                        printf(" %s", rd_kafka_err2str(t->err));
+                        if (t->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
+                                printf(" (try again)");
+                }
+                printf("\n");
+
+                /* Iterate topic's partitions */
+                for (j = 0 ; j < t->partition_cnt ; j++) {
+                        const struct rd_kafka_metadata_partition *p;
+                        p = &t->partitions[j];
+                        printf("    partition %"PRId32", "
+                               "leader %"PRId32", replicas: ",
+                               p->id, p->leader);
+
+                        /* Iterate partition's replicas */
+                        for (k = 0 ; k < p->replica_cnt ; k++)
+                                printf("%s%"PRId32,
+                                       k > 0 ? ",":"", p->replicas[k]);
+
+                        /* Iterate partition's ISRs */
+                        printf(", isrs: ");
+                        for (k = 0 ; k < p->isr_cnt ; k++)
+                                printf("%s%"PRId32,
+                                       k > 0 ? ",":"", p->isrs[k]);
+                        if (p->err)
+                                printf(", %s\n", rd_kafka_err2str(p->err));
+                        else
+                                printf("\n");
+                }
+        }
+}
+
+
+static void sig_usr1 (int sig) {
+	rd_kafka_dump(stdout, rk);
+}
+
+int main (int argc, char **argv) {
+	rd_kafka_topic_t *rkt;
+	char *brokers = "localhost:9092";
+	char mode = 'C';
+	char *topic = NULL;
+	int partition = RD_KAFKA_PARTITION_UA;
+	int opt;
+	rd_kafka_conf_t *conf;
+	rd_kafka_topic_conf_t *topic_conf;
+	char errstr[512];
+	int64_t start_offset = 0;
+        int report_offsets = 0;
+	int do_conf_dump = 0;
+	char tmp[16];
+        int64_t seek_offset = 0;
+        int64_t tmp_offset = 0;
+	int get_wmarks = 0;
+        rd_kafka_headers_t *hdrs = NULL;
+        rd_kafka_resp_err_t err;
+
+	/* Kafka configuration */
+	conf = rd_kafka_conf_new();
+
+        /* Set logger */
+        rd_kafka_conf_set_log_cb(conf, logger);
+
+	/* Quick termination */
+	snprintf(tmp, sizeof(tmp), "%i", SIGIO);
+	rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
+
+	/* Topic configuration */
+	topic_conf = rd_kafka_topic_conf_new();
+
+	while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:As:H:")) != -1) {
+		switch (opt) {
+		case 'P':
+		case 'C':
+                case 'L':
+			mode = opt;
+			break;
+		case 't':
+			topic = optarg;
+			break;
+		case 'p':
+			partition = atoi(optarg);
+			break;
+		case 'b':
+			brokers = optarg;
+			break;
+		case 'z':
+			if (rd_kafka_conf_set(conf, "compression.codec",
+					      optarg,
+					      errstr, sizeof(errstr)) !=
+			    RD_KAFKA_CONF_OK) {
+				fprintf(stderr, "%% %s\n", errstr);
+				exit(1);
+			}
+			break;
+		case 'o':
+                case 's':
+			if (!strcmp(optarg, "end"))
+				tmp_offset = RD_KAFKA_OFFSET_END;
+			else if (!strcmp(optarg, "beginning"))
+				tmp_offset = RD_KAFKA_OFFSET_BEGINNING;
+			else if (!strcmp(optarg, "stored"))
+				tmp_offset = RD_KAFKA_OFFSET_STORED;
+                        else if (!strcmp(optarg, "report"))
+                                report_offsets = 1;
+			else if (!strcmp(optarg, "wmark"))
+				get_wmarks = 1;
+			else {
+				tmp_offset = strtoll(optarg, NULL, 10);
+
+				if (tmp_offset < 0)
+					tmp_offset = RD_KAFKA_OFFSET_TAIL(-tmp_offset);
+			}
+
+                        if (opt == 'o')
+                                start_offset = tmp_offset;
+                        else if (opt == 's')
+                                seek_offset = tmp_offset;
+			break;
+		case 'e':
+			exit_eof = 1;
+			break;
+		case 'd':
+			if (rd_kafka_conf_set(conf, "debug", optarg,
+					      errstr, sizeof(errstr)) !=
+			    RD_KAFKA_CONF_OK) {
+				fprintf(stderr,
+					"%% Debug configuration failed: "
+					"%s: %s\n",
+					errstr, optarg);
+				exit(1);
+			}
+			break;
+		case 'q':
+			quiet = 1;
+			break;
+		case 'A':
+			output = OUTPUT_RAW;
+			break;
+                case 'H':
+                {
+                        char *name, *val;
+                        size_t name_sz = -1;
+
+                        name = optarg;
+                        val = strchr(name, '=');
+                        if (val) {
+                                name_sz = (size_t)(val-name);
+                                val++; /* past the '=' */
+                        }
+
+                        if (!hdrs)
+                                hdrs = rd_kafka_headers_new(8);
+
+                        err = rd_kafka_header_add(hdrs, name, name_sz, val, -1);
+                        if (err) {
+                                fprintf(stderr,
+                                        "%% Failed to add header %s: %s\n",
+                                        name, rd_kafka_err2str(err));
+                                exit(1);
+                        }
+                }
+                break;
+
+		case 'X':
+		{
+			char *name, *val;
+			rd_kafka_conf_res_t res;
+
+			if (!strcmp(optarg, "list") ||
+			    !strcmp(optarg, "help")) {
+				rd_kafka_conf_properties_show(stdout);
+				exit(0);
+			}
+
+			if (!strcmp(optarg, "dump")) {
+				do_conf_dump = 1;
+				continue;
+			}
+
+			name = optarg;
+			if (!(val = strchr(name, '='))) {
+				char dest[512];
+				size_t dest_size = sizeof(dest);
+				/* Return current value for property. */
+
+				res = RD_KAFKA_CONF_UNKNOWN;
+				if (!strncmp(name, "topic.", strlen("topic.")))
+					res = rd_kafka_topic_conf_get(
+						topic_conf,
+						name+strlen("topic."),
+						dest, &dest_size);
+				if (res == RD_KAFKA_CONF_UNKNOWN)
+					res = rd_kafka_conf_get(
+						conf, name, dest, &dest_size);
+
+				if (res == RD_KAFKA_CONF_OK) {
+					printf("%s = %s\n", name, dest);
+					exit(0);
+				} else {
+					fprintf(stderr,
+						"%% %s property\n",
+						res == RD_KAFKA_CONF_UNKNOWN ?
+						"Unknown" : "Invalid");
+					exit(1);
+				}
+			}
+
+			*val = '\0';
+			val++;
+
+			res = RD_KAFKA_CONF_UNKNOWN;
+			/* Try "topic." prefixed properties on topic
+			 * conf first, and then fall through to global if
+			 * it didnt match a topic configuration property. */
+			if (!strncmp(name, "topic.", strlen("topic.")))
+				res = rd_kafka_topic_conf_set(topic_conf,
+							      name+
+							      strlen("topic."),
+							      val,
+							      errstr,
+							      sizeof(errstr));
+
+			if (res == RD_KAFKA_CONF_UNKNOWN)
+				res = rd_kafka_conf_set(conf, name, val,
+							errstr, sizeof(errstr));
+
+			if (res != RD_KAFKA_CONF_OK) {
+				fprintf(stderr, "%% %s\n", errstr);
+				exit(1);
+			}
+		}
+		break;
+
+		default:
+			goto usage;
+		}
+	}
+
+
+	if (do_conf_dump) {
+		const char **arr;
+		size_t cnt;
+		int pass;
+
+		for (pass = 0 ; pass < 2 ; pass++) {
+			int i;
+
+			if (pass == 0) {
+				arr = rd_kafka_conf_dump(conf, &cnt);
+				printf("# Global config\n");
+			} else {
+				printf("# Topic config\n");
+				arr = rd_kafka_topic_conf_dump(topic_conf,
+							       &cnt);
+			}
+
+			for (i = 0 ; i < (int)cnt ; i += 2)
+				printf("%s = %s\n",
+				       arr[i], arr[i+1]);
+
+			printf("\n");
+
+			rd_kafka_conf_dump_free(arr, cnt);
+		}
+
+		exit(0);
+	}
+
+
+	if (optind != argc || (mode != 'L' && !topic)) {
+	usage:
+		fprintf(stderr,
+			"Usage: %s -C|-P|-L -t <topic> "
+			"[-p <partition>] [-b <host1:port1,host2:port2,..>]\n"
+			"\n"
+			"librdkafka version %s (0x%08x)\n"
+			"\n"
+			" Options:\n"
+			"  -C | -P         Consumer or Producer mode\n"
+                        "  -L              Metadata list mode\n"
+			"  -t <topic>      Topic to fetch / produce\n"
+			"  -p <num>        Partition (random partitioner)\n"
+			"  -b <brokers>    Broker address (localhost:9092)\n"
+			"  -z <codec>      Enable compression:\n"
+			"                  none|gzip|snappy\n"
+			"  -o <offset>     Start offset (consumer):\n"
+			"                  beginning, end, NNNNN or -NNNNN\n"
+			"                  wmark returns the current hi&lo "
+			"watermarks.\n"
+                        "  -o report       Report message offsets (producer)\n"
+			"  -e              Exit consumer when last message\n"
+			"                  in partition has been received.\n"
+			"  -d [facs..]     Enable debugging contexts:\n"
+			"                  %s\n"
+			"  -q              Be quiet\n"
+			"  -A              Raw payload output (consumer)\n"
+                        "  -H <name[=value]> Add header to message (producer)\n"
+			"  -X <prop=name>  Set arbitrary librdkafka "
+			"configuration property\n"
+			"                  Properties prefixed with \"topic.\" "
+			"will be set on topic object.\n"
+			"  -X list         Show full list of supported "
+			"properties.\n"
+			"  -X <prop>       Get single property value\n"
+			"\n"
+			" In Consumer mode:\n"
+			"  writes fetched messages to stdout\n"
+			" In Producer mode:\n"
+			"  reads messages from stdin and sends to broker\n"
+                        " In List mode:\n"
+                        "  queries broker for metadata information, "
+                        "topic is optional.\n"
+			"\n"
+			"\n"
+			"\n",
+			argv[0],
+			rd_kafka_version_str(), rd_kafka_version(),
+			RD_KAFKA_DEBUG_CONTEXTS);
+		exit(1);
+	}
+
+	if ((mode == 'C' && !isatty(STDIN_FILENO)) ||
+	    (mode != 'C' && !isatty(STDOUT_FILENO)))
+		quiet = 1;
+
+
+	signal(SIGINT, stop);
+	signal(SIGUSR1, sig_usr1);
+
+	if (mode == 'P') {
+		/*
+		 * Producer
+		 */
+		char buf[2048];
+		int sendcnt = 0;
+
+		/* Set up a message delivery report callback.
+		 * It will be called once for each message, either on successful
+		 * delivery to broker, or upon failure to deliver to broker. */
+
+                /* If offset reporting (-o report) is enabled, use the
+                 * richer dr_msg_cb instead. */
+                if (report_offsets) {
+                        rd_kafka_topic_conf_set(topic_conf,
+                                                "produce.offset.report",
+                                                "true", errstr, sizeof(errstr));
+                        rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered2);
+                } else
+                        rd_kafka_conf_set_dr_cb(conf, msg_delivered);
+
+		/* Create Kafka handle */
+		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
+					errstr, sizeof(errstr)))) {
+			fprintf(stderr,
+				"%% Failed to create new producer: %s\n",
+				errstr);
+			exit(1);
+		}
+
+		/* Add brokers */
+		if (rd_kafka_brokers_add(rk, brokers) == 0) {
+			fprintf(stderr, "%% No valid brokers specified\n");
+			exit(1);
+		}
+
+		/* Create topic */
+		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+                topic_conf = NULL; /* Now owned by topic */
+
+		if (!quiet)
+			fprintf(stderr,
+				"%% Type stuff and hit enter to send\n");
+
+		while (run && fgets(buf, sizeof(buf), stdin)) {
+			size_t len = strlen(buf);
+			if (buf[len-1] == '\n')
+				buf[--len] = '\0';
+
+			/* Send/Produce message. */
+                        if (hdrs) {
+                                rd_kafka_headers_t *hdrs_copy;
+
+                                hdrs_copy = rd_kafka_headers_copy(hdrs);
+
+                                err = rd_kafka_producev(
+                                        rk,
+                                        RD_KAFKA_V_RKT(rkt),
+                                        RD_KAFKA_V_PARTITION(partition),
+                                        RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+                                        RD_KAFKA_V_VALUE(buf, len),
+                                        RD_KAFKA_V_HEADERS(hdrs_copy),
+                                        RD_KAFKA_V_END);
+
+                                if (err)
+                                        rd_kafka_headers_destroy(hdrs_copy);
+
+                        } else {
+                                if (rd_kafka_produce(
+                                            rkt, partition,
+                                            RD_KAFKA_MSG_F_COPY,
+                                            /* Payload and length */
+                                            buf, len,
+                                            /* Optional key and its length */
+                                            NULL, 0,
+                                            /* Message opaque, provided in
+                                             * delivery report callback as
+                                             * msg_opaque. */
+                                            NULL) == -1) {
+                                        err = rd_kafka_last_error();
+                                }
+                        }
+
+                        if (err) {
+                                fprintf(stderr,
+                                        "%% Failed to produce to topic %s "
+					"partition %i: %s\n",
+					rd_kafka_topic_name(rkt), partition,
+					rd_kafka_err2str(err));
+
+				/* Poll to handle delivery reports */
+				rd_kafka_poll(rk, 0);
+				continue;
+			}
+
+			if (!quiet)
+				fprintf(stderr, "%% Sent %zd bytes to topic "
+					"%s partition %i\n",
+				len, rd_kafka_topic_name(rkt), partition);
+			sendcnt++;
+			/* Poll to handle delivery reports */
+			rd_kafka_poll(rk, 0);
+		}
+
+		/* Poll to handle delivery reports */
+		rd_kafka_poll(rk, 0);
+
+		/* Wait for messages to be delivered */
+		while (run && rd_kafka_outq_len(rk) > 0)
+			rd_kafka_poll(rk, 100);
+
+		/* Destroy topic */
+		rd_kafka_topic_destroy(rkt);
+
+		/* Destroy the handle */
+		rd_kafka_destroy(rk);
+
+	} else if (mode == 'C') {
+		/*
+		 * Consumer
+		 */
+
+		/* Create Kafka handle */
+		if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
+					errstr, sizeof(errstr)))) {
+			fprintf(stderr,
+				"%% Failed to create new consumer: %s\n",
+				errstr);
+			exit(1);
+		}
+
+		/* Add brokers */
+		if (rd_kafka_brokers_add(rk, brokers) == 0) {
+			fprintf(stderr, "%% No valid brokers specified\n");
+			exit(1);
+		}
+
+		if (get_wmarks) {
+			int64_t lo, hi;
+                        rd_kafka_resp_err_t err;
+
+			/* Only query for hi&lo partition watermarks */
+
+			if ((err = rd_kafka_query_watermark_offsets(
+				     rk, topic, partition, &lo, &hi, 5000))) {
+				fprintf(stderr, "%% query_watermark_offsets() "
+					"failed: %s\n",
+					rd_kafka_err2str(err));
+				exit(1);
+			}
+
+			printf("%s [%d]: low - high offsets: "
+			       "%"PRId64" - %"PRId64"\n",
+			       topic, partition, lo, hi);
+
+			rd_kafka_destroy(rk);
+			exit(0);
+		}
+
+
+		/* Create topic */
+		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+                topic_conf = NULL; /* Now owned by topic */
+
+		/* Start consuming */
+		if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){
+			rd_kafka_resp_err_t err = rd_kafka_last_error();
+			fprintf(stderr, "%% Failed to start consuming: %s\n",
+				rd_kafka_err2str(err));
+                        if (err == RD_KAFKA_RESP_ERR__INVALID_ARG)
+                                fprintf(stderr,
+                                        "%% Broker based offset storage "
+                                        "requires a group.id, "
+                                        "add: -X group.id=yourGroup\n");
+			exit(1);
+		}
+
+		while (run) {
+			rd_kafka_message_t *rkmessage;
+                        rd_kafka_resp_err_t err;
+
+                        /* Poll for errors, etc. */
+                        rd_kafka_poll(rk, 0);
+
+			/* Consume single message.
+			 * See rdkafka_performance.c for high speed
+			 * consuming of messages. */
+			rkmessage = rd_kafka_consume(rkt, partition, 1000);
+			if (!rkmessage) /* timeout */
+				continue;
+
+			msg_consume(rkmessage, NULL);
+
+			/* Return message to rdkafka */
+			rd_kafka_message_destroy(rkmessage);
+
+                        if (seek_offset) {
+                                err = rd_kafka_seek(rkt, partition, seek_offset,
+                                                    2000);
+                                if (err)
+                                        printf("Seek failed: %s\n",
+                                               rd_kafka_err2str(err));
+                                else
+                                        printf("Seeked to %"PRId64"\n",
+                                               seek_offset);
+                                seek_offset = 0;
+                        }
+		}
+
+		/* Stop consuming */
+		rd_kafka_consume_stop(rkt, partition);
+
+                while (rd_kafka_outq_len(rk) > 0)
+                        rd_kafka_poll(rk, 10);
+
+		/* Destroy topic */
+		rd_kafka_topic_destroy(rkt);
+
+		/* Destroy handle */
+		rd_kafka_destroy(rk);
+
+        } else if (mode == 'L') {
+                rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+		/* Create Kafka handle */
+		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
+					errstr, sizeof(errstr)))) {
+			fprintf(stderr,
+				"%% Failed to create new producer: %s\n",
+				errstr);
+			exit(1);
+		}
+
+		/* Add brokers */
+		if (rd_kafka_brokers_add(rk, brokers) == 0) {
+			fprintf(stderr, "%% No valid brokers specified\n");
+			exit(1);
+		}
+
+                /* Create topic */
+                if (topic) {
+                        rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+                        topic_conf = NULL; /* Now owned by topic */
+                } else
+                        rkt = NULL;
+
+                while (run) {
+                        const struct rd_kafka_metadata *metadata;
+
+                        /* Fetch metadata */
+                        err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt,
+                                                &metadata, 5000);
+                        if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
+                                fprintf(stderr,
+                                        "%% Failed to acquire metadata: %s\n",
+                                        rd_kafka_err2str(err));
+                                run = 0;
+                                break;
+                        }
+
+                        metadata_print(topic, metadata);
+
+                        rd_kafka_metadata_destroy(metadata);
+                        run = 0;
+                }
+
+		/* Destroy topic */
+		if (rkt)
+			rd_kafka_topic_destroy(rkt);
+
+		/* Destroy the handle */
+		rd_kafka_destroy(rk);
+
+                if (topic_conf)
+                        rd_kafka_topic_conf_destroy(topic_conf);
+
+
+                /* Exit right away, dont wait for background cleanup, we haven't
+                 * done anything important anyway. */
+                exit(err ? 2 : 0);
+        }
+
+        if (hdrs)
+                rd_kafka_headers_destroy(hdrs);
+
+        if (topic_conf)
+                rd_kafka_topic_conf_destroy(topic_conf);
+
+	/* Let background threads clean up and terminate cleanly. */
+	run = 5;
+	while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1)
+		printf("Waiting for librdkafka to decommission\n");
+	if (run <= 0)
+		rd_kafka_dump(stdout, rk);
+
+	return 0;
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/examples/rdkafka_example.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/examples/rdkafka_example.cpp b/thirdparty/librdkafka-0.11.4/examples/rdkafka_example.cpp
new file mode 100644
index 0000000..30d0d0e
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/examples/rdkafka_example.cpp
@@ -0,0 +1,645 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2014, Magnus Edenhill
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met: 
+ * 
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer. 
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution. 
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer example programs
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+
+#ifdef _MSC_VER
+#include "../win32/wingetopt.h"
+#elif _AIX
+#include <unistd.h>
+#else
+#include <getopt.h>
+#endif
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+
+static void metadata_print (const std::string &topic,
+                            const RdKafka::Metadata *metadata) {
+  std::cout << "Metadata for " << (topic.empty() ? "" : "all topics")
+           << "(from broker "  << metadata->orig_broker_id()
+           << ":" << metadata->orig_broker_name() << std::endl;
+
+  /* Iterate brokers */
+  std::cout << " " << metadata->brokers()->size() << " brokers:" << std::endl;
+  RdKafka::Metadata::BrokerMetadataIterator ib;
+  for (ib = metadata->brokers()->begin();
+       ib != metadata->brokers()->end();
+       ++ib) {
+    std::cout << "  broker " << (*ib)->id() << " at "
+              << (*ib)->host() << ":" << (*ib)->port() << std::endl;
+  }
+  /* Iterate topics */
+  std::cout << metadata->topics()->size() << " topics:" << std::endl;
+  RdKafka::Metadata::TopicMetadataIterator it;
+  for (it = metadata->topics()->begin();
+       it != metadata->topics()->end();
+       ++it) {
+    std::cout << "  topic \""<< (*it)->topic() << "\" with "
+              << (*it)->partitions()->size() << " partitions:";
+
+    if ((*it)->err() != RdKafka::ERR_NO_ERROR) {
+      std::cout << " " << err2str((*it)->err());
+      if ((*it)->err() == RdKafka::ERR_LEADER_NOT_AVAILABLE)
+        std::cout << " (try again)";
+    }
+    std::cout << std::endl;
+
+    /* Iterate topic's partitions */
+    RdKafka::TopicMetadata::PartitionMetadataIterator ip;
+    for (ip = (*it)->partitions()->begin();
+         ip != (*it)->partitions()->end();
+         ++ip) {
+      std::cout << "    partition " << (*ip)->id()
+                << ", leader " << (*ip)->leader()
+                << ", replicas: ";
+
+      /* Iterate partition's replicas */
+      RdKafka::PartitionMetadata::ReplicasIterator ir;
+      for (ir = (*ip)->replicas()->begin();
+           ir != (*ip)->replicas()->end();
+           ++ir) {
+        std::cout << (ir == (*ip)->replicas()->begin() ? "":",") << *ir;
+      }
+
+      /* Iterate partition's ISRs */
+      std::cout << ", isrs: ";
+      RdKafka::PartitionMetadata::ISRSIterator iis;
+      for (iis = (*ip)->isrs()->begin(); iis != (*ip)->isrs()->end() ; ++iis)
+        std::cout << (iis == (*ip)->isrs()->begin() ? "":",") << *iis;
+
+      if ((*ip)->err() != RdKafka::ERR_NO_ERROR)
+        std::cout << ", " << RdKafka::err2str((*ip)->err()) << std::endl;
+      else
+        std::cout << std::endl;
+    }
+  }
+}
+
+static bool run = true;
+static bool exit_eof = false;
+
+static void sigterm (int sig) {
+  run = false;
+}
+
+
+class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
+ public:
+  void dr_cb (RdKafka::Message &message) {
+    std::cout << "Message delivery for (" << message.len() << " bytes): " <<
+        message.errstr() << std::endl;
+    if (message.key())
+      std::cout << "Key: " << *(message.key()) << ";" << std::endl;
+  }
+};
+
+
+class ExampleEventCb : public RdKafka::EventCb {
+ public:
+  void event_cb (RdKafka::Event &event) {
+    switch (event.type())
+    {
+      case RdKafka::Event::EVENT_ERROR:
+        std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
+            event.str() << std::endl;
+        if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
+          run = false;
+        break;
+
+      case RdKafka::Event::EVENT_STATS:
+        std::cerr << "\"STATS\": " << event.str() << std::endl;
+        break;
+
+      case RdKafka::Event::EVENT_LOG:
+        fprintf(stderr, "LOG-%i-%s: %s\n",
+                event.severity(), event.fac().c_str(), event.str().c_str());
+        break;
+
+      default:
+        std::cerr << "EVENT " << event.type() <<
+            " (" << RdKafka::err2str(event.err()) << "): " <<
+            event.str() << std::endl;
+        break;
+    }
+  }
+};
+
+
+/* Use of this partitioner is pretty pointless since no key is provided
+ * in the produce() call. */
+class MyHashPartitionerCb : public RdKafka::PartitionerCb {
+ public:
+  int32_t partitioner_cb (const RdKafka::Topic *topic, const std::string *key,
+                          int32_t partition_cnt, void *msg_opaque) {
+    return djb_hash(key->c_str(), key->size()) % partition_cnt;
+  }
+ private:
+
+  static inline unsigned int djb_hash (const char *str, size_t len) {
+    unsigned int hash = 5381;
+    for (size_t i = 0 ; i < len ; i++)
+      hash = ((hash << 5) + hash) + str[i];
+    return hash;
+  }
+};
+
+void msg_consume(RdKafka::Message* message, void* opaque) {
+  switch (message->err()) {
+    case RdKafka::ERR__TIMED_OUT:
+      break;
+
+    case RdKafka::ERR_NO_ERROR:
+      /* Real message */
+      std::cout << "Read msg at offset " << message->offset() << std::endl;
+      if (message->key()) {
+        std::cout << "Key: " << *message->key() << std::endl;
+      }
+      printf("%.*s\n",
+        static_cast<int>(message->len()),
+        static_cast<const char *>(message->payload()));
+      break;
+
+    case RdKafka::ERR__PARTITION_EOF:
+      /* Last message */
+      if (exit_eof) {
+        run = false;
+      }
+      break;
+
+    case RdKafka::ERR__UNKNOWN_TOPIC:
+    case RdKafka::ERR__UNKNOWN_PARTITION:
+      std::cerr << "Consume failed: " << message->errstr() << std::endl;
+      run = false;
+      break;
+
+    default:
+      /* Errors */
+      std::cerr << "Consume failed: " << message->errstr() << std::endl;
+      run = false;
+  }
+}
+
+
+class ExampleConsumeCb : public RdKafka::ConsumeCb {
+ public:
+  void consume_cb (RdKafka::Message &msg, void *opaque) {
+    msg_consume(&msg, opaque);
+  }
+};
+
+
+
+int main (int argc, char **argv) {
+  std::string brokers = "localhost";
+  std::string errstr;
+  std::string topic_str;
+  std::string mode;
+  std::string debug;
+  int32_t partition = RdKafka::Topic::PARTITION_UA;
+  int64_t start_offset = RdKafka::Topic::OFFSET_BEGINNING;
+  bool do_conf_dump = false;
+  int opt;
+  MyHashPartitionerCb hash_partitioner;
+  int use_ccb = 0;
+
+  /*
+   * Create configuration objects
+   */
+  RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+  RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
+
+
+  while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:AM:f:")) != -1) {
+    switch (opt) {
+    case 'P':
+    case 'C':
+    case 'L':
+      mode = opt;
+      break;
+    case 't':
+      topic_str = optarg;
+      break;
+    case 'p':
+      if (!strcmp(optarg, "random"))
+        /* default */;
+      else if (!strcmp(optarg, "hash")) {
+        if (tconf->set("partitioner_cb", &hash_partitioner, errstr) !=
+            RdKafka::Conf::CONF_OK) {
+          std::cerr << errstr << std::endl;
+          exit(1);
+        }
+      } else
+        partition = std::atoi(optarg);
+      break;
+    case 'b':
+      brokers = optarg;
+      break;
+    case 'z':
+      if (conf->set("compression.codec", optarg, errstr) !=
+	  RdKafka::Conf::CONF_OK) {
+	std::cerr << errstr << std::endl;
+	exit(1);
+      }
+      break;
+    case 'o':
+      if (!strcmp(optarg, "end"))
+	start_offset = RdKafka::Topic::OFFSET_END;
+      else if (!strcmp(optarg, "beginning"))
+	start_offset = RdKafka::Topic::OFFSET_BEGINNING;
+      else if (!strcmp(optarg, "stored"))
+	start_offset = RdKafka::Topic::OFFSET_STORED;
+      else
+	start_offset = strtoll(optarg, NULL, 10);
+      break;
+    case 'e':
+      exit_eof = true;
+      break;
+    case 'd':
+      debug = optarg;
+      break;
+    case 'M':
+      if (conf->set("statistics.interval.ms", optarg, errstr) !=
+          RdKafka::Conf::CONF_OK) {
+        std::cerr << errstr << std::endl;
+        exit(1);
+      }
+      break;
+    case 'X':
+      {
+	char *name, *val;
+
+	if (!strcmp(optarg, "dump")) {
+	  do_conf_dump = true;
+	  continue;
+	}
+
+	name = optarg;
+	if (!(val = strchr(name, '='))) {
+          std::cerr << "%% Expected -X property=value, not " <<
+              name << std::endl;
+	  exit(1);
+	}
+
+	*val = '\0';
+	val++;
+
+	/* Try "topic." prefixed properties on topic
+	 * conf first, and then fall through to global if
+	 * it didnt match a topic configuration property. */
+        RdKafka::Conf::ConfResult res;
+	if (!strncmp(name, "topic.", strlen("topic.")))
+          res = tconf->set(name+strlen("topic."), val, errstr);
+        else
+	  res = conf->set(name, val, errstr);
+
+	if (res != RdKafka::Conf::CONF_OK) {
+          std::cerr << errstr << std::endl;
+	  exit(1);
+	}
+      }
+      break;
+
+      case 'f':
+        if (!strcmp(optarg, "ccb"))
+          use_ccb = 1;
+        else {
+          std::cerr << "Unknown option: " << optarg << std::endl;
+          exit(1);
+        }
+        break;
+
+    default:
+      goto usage;
+    }
+  }
+
+  if (mode.empty() || (topic_str.empty() && mode != "L") || optind != argc) {
+  usage:
+	  std::string features;
+	  conf->get("builtin.features", features);
+    fprintf(stderr,
+            "Usage: %s [-C|-P] -t <topic> "
+            "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n"
+            "\n"
+            "librdkafka version %s (0x%08x, builtin.features \"%s\")\n"
+            "\n"
+            " Options:\n"
+            "  -C | -P         Consumer or Producer mode\n"
+            "  -L              Metadata list mode\n"
+            "  -t <topic>      Topic to fetch / produce\n"
+            "  -p <num>        Partition (random partitioner)\n"
+            "  -p <func>       Use partitioner:\n"
+            "                  random (default), hash\n"
+            "  -b <brokers>    Broker address (localhost:9092)\n"
+            "  -z <codec>      Enable compression:\n"
+            "                  none|gzip|snappy\n"
+            "  -o <offset>     Start offset (consumer)\n"
+            "  -e              Exit consumer when last message\n"
+            "                  in partition has been received.\n"
+            "  -d [facs..]     Enable debugging contexts:\n"
+            "                  %s\n"
+            "  -M <intervalms> Enable statistics\n"
+            "  -X <prop=name>  Set arbitrary librdkafka "
+            "configuration property\n"
+            "                  Properties prefixed with \"topic.\" "
+            "will be set on topic object.\n"
+            "                  Use '-X list' to see the full list\n"
+            "                  of supported properties.\n"
+            "  -f <flag>       Set option:\n"
+            "                     ccb - use consume_callback\n"
+            "\n"
+            " In Consumer mode:\n"
+            "  writes fetched messages to stdout\n"
+            " In Producer mode:\n"
+            "  reads messages from stdin and sends to broker\n"
+            "\n"
+            "\n"
+            "\n",
+	    argv[0],
+	    RdKafka::version_str().c_str(), RdKafka::version(),
+		features.c_str(),
+	    RdKafka::get_debug_contexts().c_str());
+	exit(1);
+  }
+
+
+  /*
+   * Set configuration properties
+   */
+  conf->set("metadata.broker.list", brokers, errstr);
+
+  if (!debug.empty()) {
+    if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) {
+      std::cerr << errstr << std::endl;
+      exit(1);
+    }
+  }
+
+  ExampleEventCb ex_event_cb;
+  conf->set("event_cb", &ex_event_cb, errstr);
+
+  if (do_conf_dump) {
+    int pass;
+
+    for (pass = 0 ; pass < 2 ; pass++) {
+      std::list<std::string> *dump;
+      if (pass == 0) {
+        dump = conf->dump();
+        std::cout << "# Global config" << std::endl;
+      } else {
+        dump = tconf->dump();
+        std::cout << "# Topic config" << std::endl;
+      }
+
+      for (std::list<std::string>::iterator it = dump->begin();
+           it != dump->end(); ) {
+        std::cout << *it << " = ";
+        it++;
+        std::cout << *it << std::endl;
+        it++;
+      }
+      std::cout << std::endl;
+    }
+    exit(0);
+  }
+
+  signal(SIGINT, sigterm);
+  signal(SIGTERM, sigterm);
+
+
+  if (mode == "P") {
+    /*
+     * Producer mode
+     */
+
+    if(topic_str.empty())
+      goto usage;
+
+    ExampleDeliveryReportCb ex_dr_cb;
+
+    /* Set delivery report callback */
+    conf->set("dr_cb", &ex_dr_cb, errstr);
+
+    /*
+     * Create producer using accumulated global configuration.
+     */
+    RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+    if (!producer) {
+      std::cerr << "Failed to create producer: " << errstr << std::endl;
+      exit(1);
+    }
+
+    std::cout << "% Created producer " << producer->name() << std::endl;
+
+    /*
+     * Create topic handle.
+     */
+    RdKafka::Topic *topic = RdKafka::Topic::create(producer, topic_str,
+						   tconf, errstr);
+    if (!topic) {
+      std::cerr << "Failed to create topic: " << errstr << std::endl;
+      exit(1);
+    }
+
+    /*
+     * Read messages from stdin and produce to broker.
+     */
+    for (std::string line; run && std::getline(std::cin, line);) {
+      if (line.empty()) {
+        producer->poll(0);
+	continue;
+      }
+
+      /*
+       * Produce message
+       */
+      RdKafka::ErrorCode resp =
+	producer->produce(topic, partition,
+			  RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
+			  const_cast<char *>(line.c_str()), line.size(),
+			  NULL, NULL);
+      if (resp != RdKafka::ERR_NO_ERROR)
+	std::cerr << "% Produce failed: " <<
+	  RdKafka::err2str(resp) << std::endl;
+      else
+	std::cerr << "% Produced message (" << line.size() << " bytes)" <<
+	  std::endl;
+
+      producer->poll(0);
+    }
+    run = true;
+
+    while (run && producer->outq_len() > 0) {
+      std::cerr << "Waiting for " << producer->outq_len() << std::endl;
+      producer->poll(1000);
+    }
+
+    delete topic;
+    delete producer;
+
+
+  } else if (mode == "C") {
+    /*
+     * Consumer mode
+     */
+
+    if(topic_str.empty())
+      goto usage;
+
+    /*
+     * Create consumer using accumulated global configuration.
+     */
+    RdKafka::Consumer *consumer = RdKafka::Consumer::create(conf, errstr);
+    if (!consumer) {
+      std::cerr << "Failed to create consumer: " << errstr << std::endl;
+      exit(1);
+    }
+
+    std::cout << "% Created consumer " << consumer->name() << std::endl;
+
+    /*
+     * Create topic handle.
+     */
+    RdKafka::Topic *topic = RdKafka::Topic::create(consumer, topic_str,
+						   tconf, errstr);
+    if (!topic) {
+      std::cerr << "Failed to create topic: " << errstr << std::endl;
+      exit(1);
+    }
+
+    /*
+     * Start consumer for topic+partition at start offset
+     */
+    RdKafka::ErrorCode resp = consumer->start(topic, partition, start_offset);
+    if (resp != RdKafka::ERR_NO_ERROR) {
+      std::cerr << "Failed to start consumer: " <<
+	RdKafka::err2str(resp) << std::endl;
+      exit(1);
+    }
+
+    ExampleConsumeCb ex_consume_cb;
+
+    /*
+     * Consume messages
+     */
+    while (run) {
+      if (use_ccb) {
+        consumer->consume_callback(topic, partition, 1000,
+                                   &ex_consume_cb, &use_ccb);
+      } else {
+        RdKafka::Message *msg = consumer->consume(topic, partition, 1000);
+        msg_consume(msg, NULL);
+        delete msg;
+      }
+      consumer->poll(0);
+    }
+
+    /*
+     * Stop consumer
+     */
+    consumer->stop(topic, partition);
+
+    consumer->poll(1000);
+
+    delete topic;
+    delete consumer;
+  } else {
+    /* Metadata mode */
+
+    /*
+     * Create producer using accumulated global configuration.
+     */
+    RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+    if (!producer) {
+      std::cerr << "Failed to create producer: " << errstr << std::endl;
+      exit(1);
+    }
+
+    std::cout << "% Created producer " << producer->name() << std::endl;
+
+    /*
+     * Create topic handle.
+     */
+    RdKafka::Topic *topic = NULL;
+    if(!topic_str.empty()) {
+      topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr);
+      if (!topic) {
+        std::cerr << "Failed to create topic: " << errstr << std::endl;
+        exit(1);
+      }
+    }
+
+    while (run) {
+      class RdKafka::Metadata *metadata;
+
+      /* Fetch metadata */
+      RdKafka::ErrorCode err = producer->metadata(topic!=NULL, topic,
+                              &metadata, 5000);
+      if (err != RdKafka::ERR_NO_ERROR) {
+        std::cerr << "%% Failed to acquire metadata: " 
+                  << RdKafka::err2str(err) << std::endl;
+              run = 0;
+              break;
+      }
+
+      metadata_print(topic_str, metadata);
+
+      delete metadata;
+      run = 0;
+    }
+
+  }
+
+
+  /*
+   * Wait for RdKafka to decommission.
+   * This is not strictly needed (when check outq_len() above), but
+   * allows RdKafka to clean up all its resources before the application
+   * exits so that memory profilers such as valgrind wont complain about
+   * memory leaks.
+   */
+  RdKafka::wait_destroyed(5000);
+
+  return 0;
+}


[12/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/xxhash.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/xxhash.h b/thirdparty/librdkafka-0.11.1/src/xxhash.h
deleted file mode 100644
index 870a6d9..0000000
--- a/thirdparty/librdkafka-0.11.1/src/xxhash.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
-   xxHash - Extremely Fast Hash algorithm
-   Header File
-   Copyright (C) 2012-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - xxHash source repository : https://github.com/Cyan4973/xxHash
-*/
-
-/* Notice extracted from xxHash homepage :
-
-xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
-It also successfully passes all tests from the SMHasher suite.
-
-Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
-
-Name            Speed       Q.Score   Author
-xxHash          5.4 GB/s     10
-CrapWow         3.2 GB/s      2       Andrew
-MumurHash 3a    2.7 GB/s     10       Austin Appleby
-SpookyHash      2.0 GB/s     10       Bob Jenkins
-SBox            1.4 GB/s      9       Bret Mulvey
-Lookup3         1.2 GB/s      9       Bob Jenkins
-SuperFastHash   1.2 GB/s      1       Paul Hsieh
-CityHash64      1.05 GB/s    10       Pike & Alakuijala
-FNV             0.55 GB/s     5       Fowler, Noll, Vo
-CRC32           0.43 GB/s     9
-MD5-32          0.33 GB/s    10       Ronald L. Rivest
-SHA1-32         0.28 GB/s    10
-
-Q.Score is a measure of quality of the hash function.
-It depends on successfully passing SMHasher test set.
-10 is a perfect score.
-
-A 64-bits version, named XXH64, is available since r35.
-It offers much better speed, but for 64-bits applications only.
-Name     Speed on 64 bits    Speed on 32 bits
-XXH64       13.8 GB/s            1.9 GB/s
-XXH32        6.8 GB/s            6.0 GB/s
-*/
-
-#ifndef XXHASH_H_5627135585666179
-#define XXHASH_H_5627135585666179 1
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/* ****************************
-*  Definitions
-******************************/
-#include <stddef.h>   /* size_t */
-typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
-
-
-/* ****************************
-*  API modifier
-******************************/
-/** XXH_PRIVATE_API
-*   This is useful to include xxhash functions in `static` mode
-*   in order to inline them, and remove their symbol from the public list.
-*   Methodology :
-*     #define XXH_PRIVATE_API
-*     #include "xxhash.h"
-*   `xxhash.c` is automatically included.
-*   It's not useful to compile and link it as a separate module.
-*/
-#ifdef XXH_PRIVATE_API
-#  ifndef XXH_STATIC_LINKING_ONLY
-#    define XXH_STATIC_LINKING_ONLY
-#  endif
-#  if defined(__GNUC__)
-#    define XXH_PUBLIC_API static __inline __attribute__((unused))
-#  elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#    define XXH_PUBLIC_API static inline
-#  elif defined(_MSC_VER)
-#    define XXH_PUBLIC_API static __inline
-#  else
-#    define XXH_PUBLIC_API static   /* this version may generate warnings for unused static functions; disable the relevant warning */
-#  endif
-#else
-#  define XXH_PUBLIC_API   /* do nothing */
-#endif /* XXH_PRIVATE_API */
-
-/*!XXH_NAMESPACE, aka Namespace Emulation :
-
-If you want to include _and expose_ xxHash functions from within your own library,
-but also want to avoid symbol collisions with other libraries which may also include xxHash,
-
-you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
-with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values).
-
-Note that no change is required within the calling program as long as it includes `xxhash.h` :
-regular symbol name will be automatically translated by this header.
-*/
-#ifdef XXH_NAMESPACE
-#  define XXH_CAT(A,B) A##B
-#  define XXH_NAME2(A,B) XXH_CAT(A,B)
-#  define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
-#  define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
-#  define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
-#  define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
-#  define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
-#  define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
-#  define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
-#  define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
-#  define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
-#  define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
-#  define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
-#  define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
-#  define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
-#  define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
-#  define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
-#  define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
-#  define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
-#  define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
-#  define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
-#endif
-
-
-/* *************************************
-*  Version
-***************************************/
-#define XXH_VERSION_MAJOR    0
-#define XXH_VERSION_MINOR    6
-#define XXH_VERSION_RELEASE  2
-#define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
-XXH_PUBLIC_API unsigned XXH_versionNumber (void);
-
-
-/*-**********************************************************************
-*  32-bits hash
-************************************************************************/
-typedef unsigned int       XXH32_hash_t;
-
-/*! XXH32() :
-    Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input".
-    The memory between input & input+length must be valid (allocated and read-accessible).
-    "seed" can be used to alter the result predictably.
-    Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */
-XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
-
-/*======   Streaming   ======*/
-typedef struct XXH32_state_s XXH32_state_t;   /* incomplete type */
-XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
-XXH_PUBLIC_API XXH_errorcode  XXH32_freeState(XXH32_state_t* statePtr);
-XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
-
-XXH_PUBLIC_API XXH_errorcode XXH32_reset  (XXH32_state_t* statePtr, unsigned int seed);
-XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
-XXH_PUBLIC_API XXH32_hash_t  XXH32_digest (const XXH32_state_t* statePtr);
-
-/*
-These functions generate the xxHash of an input provided in multiple segments.
-Note that, for small input, they are slower than single-call functions, due to state management.
-For small input, prefer `XXH32()` and `XXH64()` .
-
-XXH state must first be allocated, using XXH*_createState() .
-
-Start a new hash by initializing state with a seed, using XXH*_reset().
-
-Then, feed the hash state by calling XXH*_update() as many times as necessary.
-Obviously, input must be allocated and read accessible.
-The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
-
-Finally, a hash value can be produced anytime, by using XXH*_digest().
-This function returns the nn-bits hash as an int or long long.
-
-It's still possible to continue inserting input into the hash state after a digest,
-and generate some new hashes later on, by calling again XXH*_digest().
-
-When done, free XXH state space if it was allocated dynamically.
-*/
-
-/*======   Canonical representation   ======*/
-
-typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
-XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
-XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
-
-/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
-*  The canonical representation uses human-readable write convention, aka big-endian (large digits first).
-*  These functions allow transformation of hash result into and from its canonical format.
-*  This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
-*/
-
-
-#ifndef XXH_NO_LONG_LONG
-/*-**********************************************************************
-*  64-bits hash
-************************************************************************/
-typedef unsigned long long XXH64_hash_t;
-
-/*! XXH64() :
-    Calculate the 64-bits hash of sequence of length "len" stored at memory address "input".
-    "seed" can be used to alter the result predictably.
-    This function runs faster on 64-bits systems, but slower on 32-bits systems (see benchmark).
-*/
-XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
-
-/*======   Streaming   ======*/
-typedef struct XXH64_state_s XXH64_state_t;   /* incomplete type */
-XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
-XXH_PUBLIC_API XXH_errorcode  XXH64_freeState(XXH64_state_t* statePtr);
-XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
-
-XXH_PUBLIC_API XXH_errorcode XXH64_reset  (XXH64_state_t* statePtr, unsigned long long seed);
-XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
-XXH_PUBLIC_API XXH64_hash_t  XXH64_digest (const XXH64_state_t* statePtr);
-
-/*======   Canonical representation   ======*/
-typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
-XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
-XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
-#endif  /* XXH_NO_LONG_LONG */
-
-
-#ifdef XXH_STATIC_LINKING_ONLY
-
-/* ================================================================================================
-   This section contains definitions which are not guaranteed to remain stable.
-   They may change in future versions, becoming incompatible with a different version of the library.
-   They shall only be used with static linking.
-   Never use these definitions in association with dynamic linking !
-=================================================================================================== */
-
-/* These definitions are only meant to allow allocation of XXH state
-   statically, on stack, or in a struct for example.
-   Do not use members directly. */
-
-   struct XXH32_state_s {
-       unsigned total_len_32;
-       unsigned large_len;
-       unsigned v1;
-       unsigned v2;
-       unsigned v3;
-       unsigned v4;
-       unsigned mem32[4];   /* buffer defined as U32 for alignment */
-       unsigned memsize;
-       unsigned reserved;   /* never read nor write, will be removed in a future version */
-   };   /* typedef'd to XXH32_state_t */
-
-#ifndef XXH_NO_LONG_LONG
-   struct XXH64_state_s {
-       unsigned long long total_len;
-       unsigned long long v1;
-       unsigned long long v2;
-       unsigned long long v3;
-       unsigned long long v4;
-       unsigned long long mem64[4];   /* buffer defined as U64 for alignment */
-       unsigned memsize;
-       unsigned reserved[2];          /* never read nor write, will be removed in a future version */
-   };   /* typedef'd to XXH64_state_t */
-#endif
-
-#  ifdef XXH_PRIVATE_API
-#    include "xxhash.c"   /* include xxhash function bodies as `static`, for inlining */
-#  endif
-
-#endif /* XXH_STATIC_LINKING_ONLY */
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* XXHASH_H_5627135585666179 */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/.gitignore
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/.gitignore b/thirdparty/librdkafka-0.11.1/win32/.gitignore
deleted file mode 100644
index 6b56d66..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/.gitignore
+++ /dev/null
@@ -1,109 +0,0 @@
-## Ignore Visual Studio temporary files, build results, and
-## files generated by popular Visual Studio add-ons.
-
-# User-specific files
-*.suo
-*.user
-*.userosscache
-*.sln.docstates
-
-# User-specific files (MonoDevelop/Xamarin Studio)
-*.userprefs
-
-# Build results
-[Dd]ebug/
-[Dd]ebugPublic/
-[Rr]elease/
-[Rr]eleases/
-x64/
-x86/
-build/
-bld/
-[Bb]in/
-[Oo]bj/
-
-# Visual Studo 2015 cache/options directory
-.vs/
-*.opendb
-
-# MSTest test Results
-[Tt]est[Rr]esult*/
-[Bb]uild[Ll]og.*
-
-# NUNIT
-*.VisualState.xml
-TestResult.xml
-
-# Build Results of an ATL Project
-[Dd]ebugPS/
-[Rr]eleasePS/
-dlldata.c
-
-*_i.c
-*_p.c
-*_i.h
-*.ilk
-*.meta
-*.obj
-*.pch
-*.pdb
-*.pgc
-*.pgd
-*.rsp
-*.sbr
-*.tlb
-*.tli
-*.tlh
-*.tmp
-*.tmp_proj
-*.log
-*.vspscc
-*.vssscc
-.builds
-*.pidb
-*.svclog
-*.scc
-
-# Visual C++ cache files
-ipch/
-*.aps
-*.ncb
-*.opensdf
-*.sdf
-*.cachefile
-
-# Visual Studio profiler
-*.psess
-*.vsp
-*.vspx
-
-# NuGet
-packages/*
-!packages/repositories.config
-
-# Installshield output folder
-[Ee]xpress/
-
-# Others
-*.[Cc]ache
-ClientBin/
-[Ss]tyle[Cc]op.*
-~$*
-*~
-*.dbmdl
-*.dbproj.schemaview
-*.pfx
-*.publishsettings
-node_modules/
-bower_components/
-
-*.filters
-*.tlog
-*.db
-*.opendb
-*.idb
-*.nupkg
-intdir
-outdir
-interim
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/README.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/README.md b/thirdparty/librdkafka-0.11.1/win32/README.md
deleted file mode 100644
index e4f7556..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-build.bat - Build for all combos of: Win32,x64,Release,Debug using the current msbuild toolset
-build-package.bat - Build NuGet packages (wrapper for package-nuget.ps1)
-package-nuget.ps1 - Build NuGet packages (using build.bat artifacts)
-push-package.bat - Push NuGet packages to NuGet (edit script for version)
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/build-package.bat
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/build-package.bat b/thirdparty/librdkafka-0.11.1/win32/build-package.bat
deleted file mode 100644
index 3a2b2a2..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/build-package.bat
+++ /dev/null
@@ -1,3 +0,0 @@
-
-powershell "%CD%\package-nuget.ps1"
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/build.bat
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/build.bat b/thirdparty/librdkafka-0.11.1/win32/build.bat
deleted file mode 100644
index cb1870f..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/build.bat
+++ /dev/null
@@ -1,19 +0,0 @@
-@echo off
-
-SET TOOLCHAIN=v140
-
-FOR %%C IN (Debug,Release) DO (
-  FOR %%P IN (Win32,x64) DO (
-     @echo Building %%C %%P
-     msbuild librdkafka.sln /p:Configuration=%%C /p:Platform=%%P /target:Clean
-     msbuild librdkafka.sln /p:Configuration=%%C /p:Platform=%%P || goto :error
-
-
-  )
-)
-
-exit /b 0
-
-:error
-echo "Build failed"
-exit /b 1

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/common.vcxproj
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/common.vcxproj b/thirdparty/librdkafka-0.11.1/win32/common.vcxproj
deleted file mode 100644
index ef5bf83..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/common.vcxproj
+++ /dev/null
@@ -1,76 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup Label="ProjectConfigurations">
-    <ProjectConfiguration Include="Debug|Win32">
-      <Configuration>Debug</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug|x64">
-      <Configuration>Debug</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|Win32">
-      <Configuration>Release</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|x64">
-      <Configuration>Release</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-  </ItemGroup>
-
-  <PropertyGroup>
-    <!-- Assume Visual Studio 2013 / 12.0 as the default -->
-  <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">12.0</VisualStudioVersion>
-  </PropertyGroup>
-  <!-- Visual Studio 2013 (12.0) -->
-  <PropertyGroup Condition="'$(VisualStudioVersion)' == '12.0'">
-    <PlatformToolset>v120</PlatformToolset>
-  </PropertyGroup>
-  <!-- Visual Studio 2015 (14.0) -->
-  <PropertyGroup Condition="'$(VisualStudioVersion)' == '14.0'">
-    <PlatformToolset>v140</PlatformToolset>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)'=='Debug'" Label="Configuration">
-    <UseDebugLibraries>true</UseDebugLibraries>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)'=='Release'" Label="Configuration">
-    <UseDebugLibraries>false</UseDebugLibraries>
-  </PropertyGroup>
-  <PropertyGroup Label="Configuration">
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup>
-    <BuildOutputDir>$(SolutionDir)\outdir\$(PlatformToolSet)\$(Platform)\$(Configuration)\</BuildOutputDir>
-    <BuildIntDir>interim\$(PlatformToolSet)\$(Platform)\$(Configuration)\</BuildIntDir>
-  </PropertyGroup>
-
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
-  <ImportGroup Label="ExtensionSettings">
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-
-
-  <PropertyGroup Label="Configuration">
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-  </PropertyGroup>
-
-  <PropertyGroup>
-    <OutDir>$(BuildOutputDir)</OutDir>
-    <IntDir>$(BuildIntDir)</IntDir>
-  </PropertyGroup>
-
-  <PropertyGroup Condition="'$(Configuration)'=='Release'" Label="Configuration">
-    <LinkIncremental>false</LinkIncremental>
-    <UseDebugLibraries>false</UseDebugLibraries>
-  </PropertyGroup>
-
-  <PropertyGroup Condition="'$(Configuration)'=='Debug'" Label="Configuration">
-    <LinkIncremental>true</LinkIncremental>
-    <UseDebugLibraries>true</UseDebugLibraries>
-  </PropertyGroup>
-
-</Project>

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/interceptor_test/interceptor_test.vcxproj
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/interceptor_test/interceptor_test.vcxproj b/thirdparty/librdkafka-0.11.1/win32/interceptor_test/interceptor_test.vcxproj
deleted file mode 100644
index bf1676b..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/interceptor_test/interceptor_test.vcxproj
+++ /dev/null
@@ -1,87 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{492CF5A9-EBF5-494E-8F71-B9B262C4D220}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>interceptor_test</RootNamespace>
-    <ProjectName>interceptor_test</ProjectName>
-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <PropertyGroup Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-  </PropertyGroup>
-  <Import Project="$(SolutionDir)common.vcxproj" />
-  <PropertyGroup Label="UserMacros" />
-  <ItemDefinitionGroup>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>librdkafka.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalOptions>/J %(AdditionalOptions)</AdditionalOptions>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
-    </ClCompile>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalOptions>/J %(AdditionalOptions)</AdditionalOptions>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
-    </ClCompile>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\tests\interceptor_test\interceptor_test.c" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/librdkafka.autopkg.template
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/librdkafka.autopkg.template b/thirdparty/librdkafka-0.11.1/win32/librdkafka.autopkg.template
deleted file mode 100644
index eeeab06..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/librdkafka.autopkg.template
+++ /dev/null
@@ -1,55 +0,0 @@
-configurations {
-    Toolset { 
-        key : "PlatformToolset"; 
-        choices: { v120, v140 };  
- 
-        // Explicitly Not including pivot variants:  "WindowsKernelModeDriver8.0", "WindowsApplicationForDrivers8.0", "WindowsUserModeDriver8.0" 
-
-        // We're normalizing out the concept of the v140 platform -- Overloading the $(PlatformToolset) variable for additional pivots was a dumb idea.
-        v140.condition = "( $(PlatformToolset.ToLower().IndexOf('v140')) > -1 Or '$(PlatformToolset.ToLower())' == 'windowskernelmodedriver8.0' Or '$(PlatformToolset.ToLower())' == 'windowsapplicationfordrivers8.0' Or '$(PlatformToolset.ToLower())' == 'windowsusermodedriver8.0' )";
-    };
- };
-
-nuget {
- nuspec {
-        id = librdkafka;
-	    // "@version" is replaced by the current Appveyor build number in the
-        // pre-deployment script.
-        version : @version;
-        title: "librdkafka";
-        authors: {Magnus Edenhill, edenhill};
-        owners: {Magnus Edenhill, edenhill};
-        licenseUrl: "https://github.com/edenhill/librdkafka/blob/master/LICENSES.txt";
-        projectUrl: "https://github.com/edenhill/librdkafka";
-        requireLicenseAcceptance: false;
-        summary: "The Apache Kafka C/C++ client library";
-		description:"The Apache Kafka C/C++ client library";
-        releaseNotes: "Release of librdkafka";
-        copyright: "Copyright 2016";
-        tags: { native, kafka, librdkafka, C, C++ };
- };
-
- files {
-	#defines {
-	  TOPDIR = ..\;
-    };
-	nestedInclude: {
-		#destination = ${d_include}librdkafka;
-		${TOPDIR}src\rdkafka.h, ${TOPDIR}src-cpp\rdkafkacpp.h
-	};
-	docs: { ${TOPDIR}README.md, ${TOPDIR}CONFIGURATION.md, ${TOPDIR}LICENSES.txt };
-
-        ("v120,v140", "Win32,x64", "Release,Debug") => {
-           [${0},${1},${2}] {
-		lib: { outdir\${0}\${1}\${2}\librdkafka*.lib };
-		symbols: { outdir\${0}\${1}\${2}\librdkafka*.pdb };
-		bin: { outdir\${0}\${1}\${2}\*.dll };
-           };
-	};
-
- };
-
- targets {
-	Defines += HAS_LIBRDKAFKA;
- };
-};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/librdkafka.master.testing.targets
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/librdkafka.master.testing.targets b/thirdparty/librdkafka-0.11.1/win32/librdkafka.master.testing.targets
deleted file mode 100644
index bccf4db..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/librdkafka.master.testing.targets
+++ /dev/null
@@ -1,13 +0,0 @@
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemDefinitionGroup>
-    <Link>
-      <AdditionalDependencies>$(MSBuildThisFileDirectory)..\..\package-win\runtimes\$(Configuration)\win7-$(Platform)\native\librdkafka.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-    <ClCompile>
-      <AdditionalIncludeDirectories>$(MSBuildThisFileDirectory)include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-    </ClCompile>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ReferenceCopyLocalPaths Include="$(MSBuildThisFileDirectory)..\..\package-win\runtimes\$(Configuration)\win7-$(Platform)\librdkafka.dll" />
-  </ItemGroup>
-</Project>

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/librdkafka.sln
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/librdkafka.sln b/thirdparty/librdkafka-0.11.1/win32/librdkafka.sln
deleted file mode 100644
index 820db38..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/librdkafka.sln
+++ /dev/null
@@ -1,176 +0,0 @@
-Microsoft Visual Studio Solution File, Format Version 12.00
-# Visual Studio 14
-VisualStudioVersion = 14.0.25420.1
-MinimumVisualStudioVersion = 10.0.40219.1
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "librdkafka", "librdkafka.vcxproj", "{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "librdkafkacpp", "librdkafkacpp\librdkafkacpp.vcxproj", "{E9641737-EE62-4EC8-88C8-792D2E3CE32D}"
-	ProjectSection(ProjectDependencies) = postProject
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54} = {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tests", "tests\tests.vcxproj", "{BE4E1264-5D13-423D-8191-71F7041459E7}"
-	ProjectSection(ProjectDependencies) = postProject
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D} = {E9641737-EE62-4EC8-88C8-792D2E3CE32D}
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54} = {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "rdkafka_example", "rdkafka_example\rdkafka_example.vcxproj", "{84585784-5BDC-43BE-B714-23EA2E7AEA5B}"
-	ProjectSection(ProjectDependencies) = postProject
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D} = {E9641737-EE62-4EC8-88C8-792D2E3CE32D}
-	EndProjectSection
-EndProject
-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{AE17F6C0-6C4D-4E92-A04D-48214C70D1AC}"
-	ProjectSection(SolutionItems) = preProject
-		librdkafka.autopkg = librdkafka.autopkg
-		librdkafka.nuspec = librdkafka.nuspec
-		librdkafka.testing.targets = librdkafka.testing.targets
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "rdkafka_consumer_example_cpp", "rdkafka_consumer_example_cpp\rdkafka_consumer_example_cpp.vcxproj", "{88B682AB-5082-49D5-A672-9904C5F43ABB}"
-	ProjectSection(ProjectDependencies) = postProject
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D} = {E9641737-EE62-4EC8-88C8-792D2E3CE32D}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "rdkafka_performance", "rdkafka_performance\rdkafka_performance.vcxproj", "{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}"
-	ProjectSection(ProjectDependencies) = postProject
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54} = {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "interceptor_test", "interceptor_test\interceptor_test.vcxproj", "{492CF5A9-EBF5-494E-8F71-B9B262C4D220}"
-	ProjectSection(ProjectDependencies) = postProject
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54} = {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}
-	EndProjectSection
-EndProject
-Global
-	GlobalSection(Performance) = preSolution
-		HasPerformanceSessions = true
-	EndGlobalSection
-	GlobalSection(SolutionConfigurationPlatforms) = preSolution
-		Debug|Any CPU = Debug|Any CPU
-		Debug|Mixed Platforms = Debug|Mixed Platforms
-		Debug|Win32 = Debug|Win32
-		Debug|x64 = Debug|x64
-		Debug|x86 = Debug|x86
-		Release|Any CPU = Release|Any CPU
-		Release|Mixed Platforms = Release|Mixed Platforms
-		Release|Win32 = Release|Win32
-		Release|x64 = Release|x64
-		Release|x86 = Release|x86
-	EndGlobalSection
-	GlobalSection(ProjectConfigurationPlatforms) = postSolution
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|Any CPU.ActiveCfg = Debug|Win32
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|Mixed Platforms.Build.0 = Debug|Win32
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|Win32.ActiveCfg = Debug|Win32
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|Win32.Build.0 = Debug|Win32
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|x64.ActiveCfg = Debug|x64
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|x64.Build.0 = Debug|x64
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|x86.ActiveCfg = Debug|Win32
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|Any CPU.ActiveCfg = Release|Win32
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|Mixed Platforms.ActiveCfg = Release|Win32
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|Mixed Platforms.Build.0 = Release|Win32
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|Win32.ActiveCfg = Release|Win32
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|Win32.Build.0 = Release|Win32
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|x64.ActiveCfg = Release|x64
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|x64.Build.0 = Release|x64
-		{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|x86.ActiveCfg = Release|Win32
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|Any CPU.ActiveCfg = Debug|Win32
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|Mixed Platforms.Build.0 = Debug|Win32
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|Win32.ActiveCfg = Debug|Win32
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|Win32.Build.0 = Debug|Win32
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|x64.ActiveCfg = Debug|x64
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|x64.Build.0 = Debug|x64
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|x86.ActiveCfg = Debug|Win32
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|Any CPU.ActiveCfg = Release|Win32
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|Mixed Platforms.ActiveCfg = Release|Win32
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|Mixed Platforms.Build.0 = Release|Win32
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|Win32.ActiveCfg = Release|Win32
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|Win32.Build.0 = Release|Win32
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|x64.ActiveCfg = Release|x64
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|x64.Build.0 = Release|x64
-		{E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|x86.ActiveCfg = Release|Win32
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|Any CPU.ActiveCfg = Debug|Win32
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|Mixed Platforms.Build.0 = Debug|Win32
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|Win32.ActiveCfg = Debug|Win32
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|Win32.Build.0 = Debug|Win32
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|x64.ActiveCfg = Debug|x64
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|x64.Build.0 = Debug|x64
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|x86.ActiveCfg = Debug|Win32
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Release|Any CPU.ActiveCfg = Release|Win32
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Release|Mixed Platforms.ActiveCfg = Release|Win32
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Release|Mixed Platforms.Build.0 = Release|Win32
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Release|Win32.ActiveCfg = Release|Win32
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Release|Win32.Build.0 = Release|Win32
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Release|x64.ActiveCfg = Release|x64
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Release|x64.Build.0 = Release|x64
-		{BE4E1264-5D13-423D-8191-71F7041459E7}.Release|x86.ActiveCfg = Release|Win32
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|Any CPU.ActiveCfg = Debug|Win32
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|Mixed Platforms.Build.0 = Debug|Win32
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|Win32.ActiveCfg = Debug|Win32
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|Win32.Build.0 = Debug|Win32
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|x64.ActiveCfg = Debug|x64
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|x64.Build.0 = Debug|x64
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|x86.ActiveCfg = Debug|Win32
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|Any CPU.ActiveCfg = Release|Win32
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|Mixed Platforms.ActiveCfg = Release|Win32
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|Mixed Platforms.Build.0 = Release|Win32
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|Win32.ActiveCfg = Release|Win32
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|Win32.Build.0 = Release|Win32
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|x64.ActiveCfg = Release|x64
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|x64.Build.0 = Release|x64
-		{84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|x86.ActiveCfg = Release|Win32
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|Any CPU.ActiveCfg = Debug|Win32
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|Mixed Platforms.Build.0 = Debug|Win32
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|Win32.ActiveCfg = Debug|Win32
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|Win32.Build.0 = Debug|Win32
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|x64.ActiveCfg = Debug|x64
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|x64.Build.0 = Debug|x64
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|x86.ActiveCfg = Debug|Win32
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|Any CPU.ActiveCfg = Release|Win32
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|Mixed Platforms.ActiveCfg = Release|Win32
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|Mixed Platforms.Build.0 = Release|Win32
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|Win32.ActiveCfg = Release|Win32
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|Win32.Build.0 = Release|Win32
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|x64.ActiveCfg = Release|x64
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|x64.Build.0 = Release|x64
-		{88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|x86.ActiveCfg = Release|Win32
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|Any CPU.ActiveCfg = Debug|Win32
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|Mixed Platforms.Build.0 = Debug|Win32
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|Win32.ActiveCfg = Debug|Win32
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|Win32.Build.0 = Debug|Win32
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|x64.ActiveCfg = Debug|x64
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|x64.Build.0 = Debug|x64
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|x86.ActiveCfg = Debug|Win32
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|x86.Build.0 = Debug|Win32
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|Any CPU.ActiveCfg = Release|Win32
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|Mixed Platforms.ActiveCfg = Release|Win32
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|Mixed Platforms.Build.0 = Release|Win32
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|Win32.ActiveCfg = Release|Win32
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|Win32.Build.0 = Release|Win32
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|x64.ActiveCfg = Release|x64
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|x64.Build.0 = Release|x64
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|x86.ActiveCfg = Release|Win32
-		{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|x86.Build.0 = Release|Win32
-		{492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Debug|Any CPU.ActiveCfg = Debug|Win32
-		{492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
-		{492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Debug|Mixed Platforms.Build.0 = Debug|Win32
-		{492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Debug|Win32.ActiveCfg = Debug|Win32
-		{492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Debug|x64.ActiveCfg = Debug|x64
-		{492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Debug|x86.ActiveCfg = Debug|Win32
-		{492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Release|Any CPU.ActiveCfg = Release|Win32
-		{492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Release|Mixed Platforms.ActiveCfg = Release|Win32
-		{492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Release|Mixed Platforms.Build.0 = Release|Win32
-		{492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Release|Win32.ActiveCfg = Release|Win32
-		{492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Release|x64.ActiveCfg = Release|x64
-		{492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Release|x86.ActiveCfg = Release|Win32
-	EndGlobalSection
-	GlobalSection(SolutionProperties) = preSolution
-		HideSolutionNode = FALSE
-	EndGlobalSection
-EndGlobal

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/librdkafka.vcxproj
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/librdkafka.vcxproj b/thirdparty/librdkafka-0.11.1/win32/librdkafka.vcxproj
deleted file mode 100644
index 1c0d844..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/librdkafka.vcxproj
+++ /dev/null
@@ -1,229 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>librdkafka</RootNamespace>
-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <PropertyGroup Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-  </PropertyGroup>
-  <Import Project="$(SolutionDir)common.vcxproj" />
-  <PropertyGroup Label="UserMacros" />
-  <PropertyGroup Condition="'$(Platform)'=='Win32'">
-    <IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);C:\OpenSSL-Win32\include</IncludePath>
-    <LibraryPath>$(VC_LibraryPath_x86);$(WindowsSDK_LibraryPath_x86);C:\OpenSSL-Win32\lib\VC\static</LibraryPath>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Platform)'=='x64'">
-    <IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);C:\OpenSSL-Win64\include</IncludePath>
-    <LibraryPath>$(VC_LibraryPath_x64);$(WindowsSDK_LibraryPath_x64);C:\OpenSSL-Win64\lib\VC\static</LibraryPath>
-  </PropertyGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>NotUsing</PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;LIBRDKAFKA_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <InlineFunctionExpansion>Default</InlineFunctionExpansion>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
-      <AdditionalOptions>/J %(AdditionalOptions)</AdditionalOptions>
-      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);libeay32MT.lib;ssleay32MT.lib</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>NotUsing</PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;LIBRDKAFKA_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <InlineFunctionExpansion>Default</InlineFunctionExpansion>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
-      <AdditionalOptions>/J %(AdditionalOptions)</AdditionalOptions>
-      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);libeay32MT.lib;ssleay32MT.lib</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;LIBRDKAFKA_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);libeay32MT.lib;ssleay32MT.lib</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;LIBRDKAFKA_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);libeay32MT.lib;ssleay32MT.lib</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ClInclude Include="..\src\crc32c.h" />
-    <ClInclude Include="..\src\queue.h" />
-    <ClInclude Include="..\src\rdatomic.h" />
-    <ClInclude Include="..\src\rdavg.h" />
-    <ClInclude Include="..\src\rdbuf.h" />
-    <ClInclude Include="..\src\rdendian.h" />
-    <ClInclude Include="..\src\rdgz.h" />
-    <ClInclude Include="..\src\rdinterval.h" />
-    <ClInclude Include="..\src\rdkafka_assignor.h" />
-    <ClInclude Include="..\src\rdkafka_buf.h" />
-    <ClInclude Include="..\src\rdkafka_cgrp.h" />
-    <ClInclude Include="..\src\rdkafka_conf.h" />
-    <ClInclude Include="..\src\rdkafka_event.h" />
-    <ClInclude Include="..\src\rdkafka_feature.h" />
-    <ClInclude Include="..\src\rdkafka_lz4.h" />
-    <ClInclude Include="..\src\rdkafka_msgset.h" />
-    <ClInclude Include="..\src\rdkafka_op.h" />
-    <ClInclude Include="..\src\rdkafka_partition.h" />
-    <ClInclude Include="..\src\rdkafka_pattern.h" />
-    <ClInclude Include="..\src\rdkafka_queue.h" />
-    <ClInclude Include="..\src\rdkafka_request.h" />
-    <ClInclude Include="..\src\rdkafka_sasl.h" />
-    <ClInclude Include="..\src\rdkafka_sasl_int.h" />
-    <ClInclude Include="..\src\rdkafka_subscription.h" />
-    <ClInclude Include="..\src\rdkafka_transport_int.h" />
-    <ClInclude Include="..\src\rdlist.h" />
-    <ClInclude Include="..\src\rdposix.h" />
-    <ClInclude Include="..\src\rd.h" />
-    <ClInclude Include="..\src\rdaddr.h" />
-    <ClInclude Include="..\src\rdcrc32.h" />
-    <ClInclude Include="..\src\rdkafka.h" />
-    <ClInclude Include="..\src\rdkafka_broker.h" />
-    <ClInclude Include="..\src\rdkafka_int.h" />
-    <ClInclude Include="..\src\rdkafka_msg.h" />
-    <ClInclude Include="..\src\rdkafka_offset.h" />
-    <ClInclude Include="..\src\rdkafka_proto.h" />
-    <ClInclude Include="..\src\rdkafka_timer.h" />
-    <ClInclude Include="..\src\rdkafka_topic.h" />
-    <ClInclude Include="..\src\rdkafka_transport.h" />
-    <ClInclude Include="..\src\rdkafka_metadata.h" />
-    <ClInclude Include="..\src\rdkafka_interceptor.h" />
-    <ClInclude Include="..\src\rdkafka_plugin.h" />
-    <ClInclude Include="..\src\rdlog.h" />
-    <ClInclude Include="..\src\rdstring.h" />
-    <ClInclude Include="..\src\rdrand.h" />
-    <ClInclude Include="..\src\rdsysqueue.h" />
-    <ClInclude Include="..\src\rdtime.h" />
-    <ClInclude Include="..\src\rdtypes.h" />
-    <ClInclude Include="..\src\rdregex.h" />
-    <ClInclude Include="..\src\rdunittest.h" />
-    <ClInclude Include="..\src\rdvarint.h" />
-    <ClInclude Include="..\src\snappy.h" />
-    <ClInclude Include="..\src\snappy_compat.h" />
-    <ClInclude Include="..\src\tinycthread.h" />
-    <ClInclude Include="..\src\rdwin32.h" />
-    <ClInclude Include="..\src\win32_config.h" />
-    <ClInclude Include="..\src\regexp.h" />
-    <ClInclude Include="..\src\rdavl.h" />
-    <ClInclude Include="..\src\rdports.h" />
-    <ClInclude Include="..\src\rddl.h" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\src\crc32c.c" />
-    <ClCompile Include="..\src\rdaddr.c" />
-    <ClCompile Include="..\src\rdbuf.c" />
-    <ClCompile Include="..\src\rdcrc32.c" />
-    <ClCompile Include="..\src\rdgz.c" />
-    <ClCompile Include="..\src\rdkafka.c" />
-    <ClCompile Include="..\src\rdkafka_assignor.c" />
-    <ClCompile Include="..\src\rdkafka_broker.c" />
-    <ClCompile Include="..\src\rdkafka_cgrp.c" />
-    <ClCompile Include="..\src\rdkafka_conf.c" />
-    <ClCompile Include="..\src\rdkafka_event.c" />
-    <ClCompile Include="..\src\rdkafka_lz4.c" />
-    <ClCompile Include="..\src\rdkafka_msg.c" />
-    <ClCompile Include="..\src\rdkafka_msgset_reader.c" />
-    <ClCompile Include="..\src\rdkafka_msgset_writer.c" />
-    <ClCompile Include="..\src\rdkafka_offset.c" />
-    <ClCompile Include="..\src\rdkafka_op.c" />
-    <ClCompile Include="..\src\rdkafka_partition.c" />
-    <ClCompile Include="..\src\rdkafka_pattern.c" />
-    <ClCompile Include="..\src\rdkafka_queue.c" />
-    <ClCompile Include="..\src\rdkafka_range_assignor.c" />
-    <ClCompile Include="..\src\rdkafka_roundrobin_assignor.c" />
-    <ClCompile Include="..\src\rdkafka_request.c" />
-    <ClCompile Include="..\src\rdkafka_sasl.c" />
-    <ClCompile Include="..\src\rdkafka_sasl_win32.c" />
-    <ClCompile Include="..\src\rdkafka_sasl_plain.c" />
-    <ClCompile Include="..\src\rdkafka_sasl_scram.c" />
-    <ClCompile Include="..\src\rdkafka_subscription.c" />
-    <ClCompile Include="..\src\rdkafka_timer.c" />
-    <ClCompile Include="..\src\rdkafka_topic.c" />
-    <ClCompile Include="..\src\rdkafka_transport.c" />
-    <ClCompile Include="..\src\rdkafka_buf.c" />
-    <ClCompile Include="..\src\rdkafka_feature.c" />
-    <ClCompile Include="..\src\rdkafka_metadata.c" />
-    <ClCompile Include="..\src\rdkafka_metadata_cache.c" />
-    <ClCompile Include="..\src\rdkafka_interceptor.c" />
-    <ClCompile Include="..\src\rdkafka_plugin.c" />
-    <ClCompile Include="..\src\rdlist.c" />
-    <ClCompile Include="..\src\rdlog.c" />
-    <ClCompile Include="..\src\rdstring.c" />
-    <ClCompile Include="..\src\rdrand.c" />
-    <ClCompile Include="..\src\rdregex.c" />
-    <ClCompile Include="..\src\rdunittest.c" />
-    <ClCompile Include="..\src\rdvarint.c" />
-    <ClCompile Include="..\src\snappy.c" />
-    <ClCompile Include="..\src\tinycthread.c" />
-    <ClCompile Include="..\src\regexp.c" />
-    <ClCompile Include="..\src\rdports.c" />
-    <ClCompile Include="..\src\rdavl.c" />
-    <ClCompile Include="..\src\xxhash.c" />
-    <ClCompile Include="..\src\lz4.c" />
-    <ClCompile Include="..\src\lz4frame.c" />
-    <ClCompile Include="..\src\lz4hc.c" />
-    <ClCompile Include="..\src\rddl.c" />
-  </ItemGroup>
-  <ItemGroup>
-    <Text Include="..\LICENSE..txt" />
-  </ItemGroup>
-  <ItemGroup>
-    <None Include="..\README.win32" />
-    <None Include="packages.config" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-    <Import Project="packages\zlib.$(PlatformToolset).windesktop.msvcstl.dyn.rt-dyn.1.2.8.8\build\native\zlib.$(PlatformToolset).windesktop.msvcstl.dyn.rt-dyn.targets" Condition="Exists('packages\zlib.$(PlatformToolset).windesktop.msvcstl.dyn.rt-dyn.1.2.8.8\build\native\zlib.$(PlatformToolset).windesktop.msvcstl.dyn.rt-dyn.targets')" />
-  </ImportGroup>
-  <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
-    <PropertyGroup>
-      <ErrorText>This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
-    </PropertyGroup>
-    <Error Condition="!Exists('packages\zlib.$(PlatformToolset).windesktop.msvcstl.dyn.rt-dyn.1.2.8.8\build\native\zlib.$(PlatformToolset).windesktop.msvcstl.dyn.rt-dyn.targets')" Text="$([System.String]::Format('$(ErrorText)', 'packages\zlib.$(PlatformToolset).windesktop.msvcstl.dyn.rt-dyn.1.2.8.8\build\native\zlib.$(PlatformToolset).windesktop.msvcstl.dyn.rt-dyn.targets'))" />
-  </Target>
-</Project>

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/librdkafkacpp/librdkafkacpp.vcxproj
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/librdkafkacpp/librdkafkacpp.vcxproj b/thirdparty/librdkafka-0.11.1/win32/librdkafkacpp/librdkafkacpp.vcxproj
deleted file mode 100644
index 789c0d1..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/librdkafkacpp/librdkafkacpp.vcxproj
+++ /dev/null
@@ -1,103 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{E9641737-EE62-4EC8-88C8-792D2E3CE32D}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>librdkafkacpp</RootNamespace>
-    <ProjectName>librdkafkacpp</ProjectName>
-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <PropertyGroup Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-  </PropertyGroup>
-  <Import Project="$(SolutionDir)common.vcxproj"/>
-  <PropertyGroup Label="UserMacros" />
-
-  <ItemDefinitionGroup>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-     <AdditionalDependencies>librdkafka.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;LIBRDKAFKACPP_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalOptions>/J %(AdditionalOptions)</AdditionalOptions>
-      <AdditionalIncludeDirectories>
-      </AdditionalIncludeDirectories>
-    </ClCompile>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;LIBRDKAFKACPP_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalOptions>/J %(AdditionalOptions)</AdditionalOptions>
-      <AdditionalIncludeDirectories>
-      </AdditionalIncludeDirectories>
-    </ClCompile>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;LIBRDKAFKACPP_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-    </ClCompile>
-    <Link>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;LIBRDKAFKACPP_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-    </ClCompile>
-    <Link>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src-cpp\ConfImpl.cpp" />
-    <ClCompile Include="..\..\src-cpp\ConsumerImpl.cpp" />
-    <ClCompile Include="..\..\src-cpp\HandleImpl.cpp" />
-    <ClCompile Include="..\..\src-cpp\KafkaConsumerImpl.cpp" />
-    <ClCompile Include="..\..\src-cpp\MessageImpl.cpp" />
-    <ClCompile Include="..\..\src-cpp\MetadataImpl.cpp" />
-    <ClCompile Include="..\..\src-cpp\ProducerImpl.cpp" />
-    <ClCompile Include="..\..\src-cpp\QueueImpl.cpp" />
-    <ClCompile Include="..\..\src-cpp\RdKafka.cpp" />
-    <ClCompile Include="..\..\src-cpp\TopicImpl.cpp" />
-    <ClCompile Include="..\..\src-cpp\TopicPartitionImpl.cpp" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src-cpp\rdkafkacpp.h" />
-    <ClInclude Include="..\..\src-cpp\rdkafkacpp_int.h" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/package-nuget.ps1
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/package-nuget.ps1 b/thirdparty/librdkafka-0.11.1/win32/package-nuget.ps1
deleted file mode 100644
index c2cb50a..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/package-nuget.ps1
+++ /dev/null
@@ -1,21 +0,0 @@
-<#
-.SYNOPSIS
-
-   Create NuGet package using CoApp
-
-
-.DESCRIPTION
-
-   A full build must be completed, to populate output directories, before
-
-   running this script.
-
-   Use build.bat to build
-
-
-   Requires CoApp
-#>
-
-
-
-Write-NuGetPackage librdkafka.autopkg

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/packages.config
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/packages.config b/thirdparty/librdkafka-0.11.1/win32/packages.config
deleted file mode 100644
index a12ef74..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/packages.config
+++ /dev/null
@@ -1,6 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<packages>
-  <package id="zlib" version="1.2.8.8" targetFramework="Native" />
-  <package id="zlib.v120.windesktop.msvcstl.dyn.rt-dyn" version="1.2.8.8" targetFramework="Native" />
-  <package id="zlib.v140.windesktop.msvcstl.dyn.rt-dyn" version="1.2.8.8" targetFramework="Native" />
-</packages>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/packages/repositories.config
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/packages/repositories.config b/thirdparty/librdkafka-0.11.1/win32/packages/repositories.config
deleted file mode 100644
index 0dec135..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/packages/repositories.config
+++ /dev/null
@@ -1,4 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<repositories>
-  <repository path="..\packages.config" />
-</repositories>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/push-package.bat
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/push-package.bat b/thirdparty/librdkafka-0.11.1/win32/push-package.bat
deleted file mode 100644
index aa6e75f..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/push-package.bat
+++ /dev/null
@@ -1,4 +0,0 @@
-set pkgversion=0.9.3-pre-wip1
-nuget push librdkafka.%pkgversion%.nupkg -Source https://www.nuget.org/api/v2/package
-nuget push librdkafka.redist.%pkgversion%.nupkg -Source https://www.nuget.org/api/v2/package
-nuget push librdkafka.symbols.%pkgversion%.nupkg -Source https://www.nuget.org/api/v2/package

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/rdkafka_consumer_example_cpp/rdkafka_consumer_example_cpp.vcxproj
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/rdkafka_consumer_example_cpp/rdkafka_consumer_example_cpp.vcxproj b/thirdparty/librdkafka-0.11.1/win32/rdkafka_consumer_example_cpp/rdkafka_consumer_example_cpp.vcxproj
deleted file mode 100644
index 06863d4..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/rdkafka_consumer_example_cpp/rdkafka_consumer_example_cpp.vcxproj
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{88B682AB-5082-49D5-A672-9904C5F43ABB}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>rdkafka_consumer_example_cpp</RootNamespace>
-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <Import Project="$(SolutionDir)common.vcxproj"/>
-  <PropertyGroup Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-  </PropertyGroup>
-  <PropertyGroup Label="UserMacros" />
-  <ItemDefinitionGroup>
-    <Link>
-      <SubSystem>Console</SubSystem>
-<AdditionalDependencies>librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-
-  <ItemDefinitionGroup>
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Enabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)..\src-cpp</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-    </Link>
-  </ItemDefinitionGroup>
-
-  <ItemDefinitionGroup Condition="'$(Configuration)'=='Debug'">
-    <ClCompile>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)'=='Release'">
-    <ClCompile>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-    </ClCompile>
-    <Link>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\examples\rdkafka_consumer_example.cpp" />
-    <ClCompile Include="..\wingetopt.c" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\wingetopt.h" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/rdkafka_example/rdkafka_example.vcxproj
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/rdkafka_example/rdkafka_example.vcxproj b/thirdparty/librdkafka-0.11.1/win32/rdkafka_example/rdkafka_example.vcxproj
deleted file mode 100644
index 36b13c0..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/rdkafka_example/rdkafka_example.vcxproj
+++ /dev/null
@@ -1,97 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{84585784-5BDC-43BE-B714-23EA2E7AEA5B}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>rdkafka_example</RootNamespace>
-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <Import Project="$(SolutionDir)common.vcxproj"/>
-  <PropertyGroup Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-  </PropertyGroup>
-  <ItemDefinitionGroup>
-    <Link>
-      <SubSystem>Console</SubSystem>
-<AdditionalDependencies>librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)/../src-cpp</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)/../src-cpp</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)/../src-cpp</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)/../src-cpp</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\examples\rdkafka_example.cpp" />
-    <ClCompile Include="..\wingetopt.c" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\wingetopt.h" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/rdkafka_performance/rdkafka_performance.vcxproj
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/rdkafka_performance/rdkafka_performance.vcxproj b/thirdparty/librdkafka-0.11.1/win32/rdkafka_performance/rdkafka_performance.vcxproj
deleted file mode 100644
index 6c6b184..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/rdkafka_performance/rdkafka_performance.vcxproj
+++ /dev/null
@@ -1,97 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>rdkafka_performance</RootNamespace>
-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <Import Project="$(SolutionDir)common.vcxproj" />
-  <PropertyGroup Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-  </PropertyGroup>
-  <ItemDefinitionGroup>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <AdditionalDependencies>librdkafka.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)/../src</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)/../src</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)/../src</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)/../src</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\examples\rdkafka_performance.c" />
-    <ClCompile Include="..\wingetopt.c" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\wingetopt.h" />
-    <ClInclude Include="..\wintime.h" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/tests/.gitignore
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/tests/.gitignore b/thirdparty/librdkafka-0.11.1/win32/tests/.gitignore
deleted file mode 100644
index a212801..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/tests/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-test.conf
-*.json
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/tests/test.conf.example
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/tests/test.conf.example b/thirdparty/librdkafka-0.11.1/win32/tests/test.conf.example
deleted file mode 100644
index ef0b547..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/tests/test.conf.example
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copy this file to test.conf and set up according to your configuration.
-
-#
-# Test configuration
-#
-# For slow connections: multiply test timeouts by this much (float)
-#test.timeout.multiplier=3.5
-
-# Test topic names are constructed by:
-#  <prefix>_<suffix>, where default topic prefix is "rdkafkatest".
-# suffix is specified by the tests.
-#test.topic.prefix=bib
-
-# Make topic names random:
-#  <prefix>_<randomnumber>_<suffix>
-#test.topic.random=true
-
-
-# Bootstrap broker(s)
-metadata.broker.list=localhost:9092
-
-# Debugging
-#debug=metadata,topic,msg,broker
-
-# Any other librdkafka configuration property.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/tests/tests.vcxproj
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/tests/tests.vcxproj b/thirdparty/librdkafka-0.11.1/win32/tests/tests.vcxproj
deleted file mode 100644
index 9a7eacc..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/tests/tests.vcxproj
+++ /dev/null
@@ -1,171 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{BE4E1264-5D13-423D-8191-71F7041459E7}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>tests</RootNamespace>
-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <Import Project="$(SolutionDir)common.vcxproj"/>
-  <PropertyGroup Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-  </PropertyGroup>
-  <PropertyGroup Label="UserMacros">
-    <NuGetPackageImportStamp>8e214174</NuGetPackageImportStamp>
-  </PropertyGroup>
-  <ItemDefinitionGroup>
-    <Link>
-      <SubSystem>Console</SubSystem>
-<AdditionalDependencies>librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
-      <ShowIncludes>false</ShowIncludes>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
-      <ShowIncludes>false</ShowIncludes>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\tests\0000-unittests.c" />
-    <ClCompile Include="..\..\tests\0001-multiobj.c" />
-    <ClCompile Include="..\..\tests\0002-unkpart.c" />
-    <ClCompile Include="..\..\tests\0003-msgmaxsize.c" />
-    <ClCompile Include="..\..\tests\0004-conf.c" />
-    <ClCompile Include="..\..\tests\0005-order.c" />
-    <ClCompile Include="..\..\tests\0006-symbols.c" />
-    <ClCompile Include="..\..\tests\0007-autotopic.c" />
-    <ClCompile Include="..\..\tests\0008-reqacks.c" />
-    <ClCompile Include="..\..\tests\0011-produce_batch.c" />
-    <ClCompile Include="..\..\tests\0012-produce_consume.c" />
-    <ClCompile Include="..\..\tests\0013-null-msgs.c" />
-    <ClCompile Include="..\..\tests\0014-reconsume-191.c" />
-    <ClCompile Include="..\..\tests\0015-offset_seeks.c" />
-    <ClCompile Include="..\..\tests\0017-compression.c" />
-    <ClCompile Include="..\..\tests\0018-cgrp_term.c" />
-    <ClCompile Include="..\..\tests\0019-list_groups.c" />
-    <ClCompile Include="..\..\tests\0020-destroy_hang.c" />
-    <ClCompile Include="..\..\tests\0021-rkt_destroy.c" />
-    <ClCompile Include="..\..\tests\0022-consume_batch.c" />
-    <ClCompile Include="..\..\tests\0025-timers.c" />
-    <ClCompile Include="..\..\tests\0026-consume_pause.c" />
-    <ClCompile Include="..\..\tests\0028-long_topicnames.c" />
-    <ClCompile Include="..\..\tests\0029-assign_offset.c" />
-    <ClCompile Include="..\..\tests\0030-offset_commit.c" />
-    <ClCompile Include="..\..\tests\0031-get_offsets.c" />
-    <ClCompile Include="..\..\tests\0033-regex_subscribe.c" />
-    <ClCompile Include="..\..\tests\0034-offset_reset.c" />
-    <ClCompile Include="..\..\tests\0035-api_version.c" />
-    <ClCompile Include="..\..\tests\0036-partial_fetch.c" />
-    <ClCompile Include="..\..\tests\0037-destroy_hang_local.c" />
-    <ClCompile Include="..\..\tests\0038-performance.c" />
-    <ClCompile Include="..\..\tests\0039-event.c" />
-    <ClCompile Include="..\..\tests\0040-io_event.c" />
-    <ClCompile Include="..\..\tests\0041-fetch_max_bytes.c" />
-    <ClCompile Include="..\..\tests\0042-many_topics.c" />
-    <ClCompile Include="..\..\tests\0043-no_connection.c" />
-    <ClCompile Include="..\..\tests\0044-partition_cnt.c" />
-    <ClCompile Include="..\..\tests\0045-subscribe_update.c" />
-    <ClCompile Include="..\..\tests\0046-rkt_cache.c" />
-    <ClCompile Include="..\..\tests\0047-partial_buf_tmout.c" />
-    <ClCompile Include="..\..\tests\0048-partitioner.c" />
-    <ClCompile Include="..\..\tests\0050-subscribe_adds.c" />
-    <ClCompile Include="..\..\tests\0051-assign_adds.c" />
-    <ClCompile Include="..\..\tests\0052-msg_timestamps.c" />
-    <ClCompile Include="..\..\tests\0053-stats_cb.cpp" />
-    <ClCompile Include="..\..\tests\0054-offset_time.cpp" />
-    <ClCompile Include="..\..\tests\0055-producer_latency.c" />
-    <ClCompile Include="..\..\tests\0056-balanced_group_mt.c" />
-    <ClCompile Include="..\..\tests\0057-invalid_topic.cpp" />
-    <ClCompile Include="..\..\tests\0058-log.cpp" />
-    <ClCompile Include="..\..\tests\0059-bsearch.cpp" />
-    <ClCompile Include="..\..\tests\0060-op_prio.cpp" />
-    <ClCompile Include="..\..\tests\0061-consumer_lag.cpp" />
-    <ClCompile Include="..\..\tests\0062-stats_event.c" />
-    <ClCompile Include="..\..\tests\0063-clusterid.cpp" />
-    <ClCompile Include="..\..\tests\0064-interceptors.c" />
-    <ClCompile Include="..\..\tests\0065-yield.cpp" />
-    <ClCompile Include="..\..\tests\0066-plugins.cpp" />
-    <ClCompile Include="..\..\tests\0067-empty_topic.cpp" />
-    <ClCompile Include="..\..\tests\0068-produce_timeout.c" />
-    <ClCompile Include="..\..\tests\0069-consumer_add_parts.c" />
-    <ClCompile Include="..\..\tests\0070-null_empty.cpp" />
-    <ClCompile Include="..\..\tests\8000-idle.cpp" />
-    <ClCompile Include="..\..\tests\test.c" />
-    <ClCompile Include="..\..\tests\testcpp.cpp" />
-    <ClCompile Include="..\..\src\tinycthread.c" />
-    <ClCompile Include="..\..\src\rdlist.c" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\tests\test.h" />
-    <ClInclude Include="..\..\tests\testcpp.h" />
-    <ClInclude Include="..\..\tests\testshared.h" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets" />
-</Project>


[40/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/lz4frame.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/lz4frame.c b/thirdparty/librdkafka-0.11.1/src/lz4frame.c
deleted file mode 100644
index e04fe83..0000000
--- a/thirdparty/librdkafka-0.11.1/src/lz4frame.c
+++ /dev/null
@@ -1,1440 +0,0 @@
-/*
-LZ4 auto-framing library
-Copyright (C) 2011-2016, Yann Collet.
-
-BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-* Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-* Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-You can contact the author at :
-- LZ4 homepage : http://www.lz4.org
-- LZ4 source repository : https://github.com/lz4/lz4
-*/
-
-/* LZ4F is a stand-alone API to create LZ4-compressed Frames
-*  in full conformance with specification v1.5.0
-*  All related operations, including memory management, are handled by the library.
-* */
-
-
-/*-************************************
-*  Compiler Options
-**************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#endif
-
-
-/*-************************************
-*  Memory routines
-**************************************/
-#include <stdlib.h>   /* malloc, calloc, free */
-#define ALLOCATOR(s)   calloc(1,s)
-#define FREEMEM        free
-#include <string.h>   /* memset, memcpy, memmove */
-#define MEM_INIT       memset
-
-
-/*-************************************
-*  Includes
-**************************************/
-#include "lz4frame_static.h"
-#include "lz4.h"
-#include "lz4hc.h"
-#define XXH_STATIC_LINKING_ONLY
-#include "xxhash.h"
-
-
-/*-************************************
-*  Common Utils
-**************************************/
-#define LZ4_STATIC_ASSERT(c)    { enum { LZ4_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-/*-************************************
-*  Basic Types
-**************************************/
-#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
-# include <stdint.h>
-  typedef  uint8_t BYTE;
-  typedef uint16_t U16;
-  typedef uint32_t U32;
-  typedef  int32_t S32;
-  typedef uint64_t U64;
-#else
-  typedef unsigned char       BYTE;
-  typedef unsigned short      U16;
-  typedef unsigned int        U32;
-  typedef   signed int        S32;
-  typedef unsigned long long  U64;
-#endif
-
-
-/* unoptimized version; solves endianess & alignment issues */
-static U32 LZ4F_readLE32 (const void* src)
-{
-    const BYTE* const srcPtr = (const BYTE*)src;
-    U32 value32 = srcPtr[0];
-    value32 += (srcPtr[1]<<8);
-    value32 += (srcPtr[2]<<16);
-    value32 += ((U32)srcPtr[3])<<24;
-    return value32;
-}
-
-static void LZ4F_writeLE32 (void* dst, U32 value32)
-{
-    BYTE* const dstPtr = (BYTE*)dst;
-    dstPtr[0] = (BYTE)value32;
-    dstPtr[1] = (BYTE)(value32 >> 8);
-    dstPtr[2] = (BYTE)(value32 >> 16);
-    dstPtr[3] = (BYTE)(value32 >> 24);
-}
-
-static U64 LZ4F_readLE64 (const void* src)
-{
-    const BYTE* const srcPtr = (const BYTE*)src;
-    U64 value64 = srcPtr[0];
-    value64 += ((U64)srcPtr[1]<<8);
-    value64 += ((U64)srcPtr[2]<<16);
-    value64 += ((U64)srcPtr[3]<<24);
-    value64 += ((U64)srcPtr[4]<<32);
-    value64 += ((U64)srcPtr[5]<<40);
-    value64 += ((U64)srcPtr[6]<<48);
-    value64 += ((U64)srcPtr[7]<<56);
-    return value64;
-}
-
-static void LZ4F_writeLE64 (void* dst, U64 value64)
-{
-    BYTE* const dstPtr = (BYTE*)dst;
-    dstPtr[0] = (BYTE)value64;
-    dstPtr[1] = (BYTE)(value64 >> 8);
-    dstPtr[2] = (BYTE)(value64 >> 16);
-    dstPtr[3] = (BYTE)(value64 >> 24);
-    dstPtr[4] = (BYTE)(value64 >> 32);
-    dstPtr[5] = (BYTE)(value64 >> 40);
-    dstPtr[6] = (BYTE)(value64 >> 48);
-    dstPtr[7] = (BYTE)(value64 >> 56);
-}
-
-
-/*-************************************
-*  Constants
-**************************************/
-#define KB *(1<<10)
-#define MB *(1<<20)
-#define GB *(1<<30)
-
-#define _1BIT  0x01
-#define _2BITS 0x03
-#define _3BITS 0x07
-#define _4BITS 0x0F
-#define _8BITS 0xFF
-
-#define LZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U
-#define LZ4F_MAGICNUMBER 0x184D2204U
-#define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U
-#define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB
-
-static const size_t minFHSize = 7;
-static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX;   /* 15 */
-static const size_t BHSize = 4;
-
-
-/*-************************************
-*  Structures and local types
-**************************************/
-typedef struct LZ4F_cctx_s
-{
-    LZ4F_preferences_t prefs;
-    U32    version;
-    U32    cStage;
-    size_t maxBlockSize;
-    size_t maxBufferSize;
-    BYTE*  tmpBuff;
-    BYTE*  tmpIn;
-    size_t tmpInSize;
-    U64    totalInSize;
-    XXH32_state_t xxh;
-    void*  lz4CtxPtr;
-    U32    lz4CtxLevel;     /* 0: unallocated;  1: LZ4_stream_t;  3: LZ4_streamHC_t */
-} LZ4F_cctx_t;
-
-
-/*-************************************
-*  Error management
-**************************************/
-#define LZ4F_GENERATE_STRING(STRING) #STRING,
-static const char* LZ4F_errorStrings[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING) };
-
-
-unsigned LZ4F_isError(LZ4F_errorCode_t code)
-{
-    return (code > (LZ4F_errorCode_t)(-LZ4F_ERROR_maxCode));
-}
-
-const char* LZ4F_getErrorName(LZ4F_errorCode_t code)
-{
-    static const char* codeError = "Unspecified error code";
-    if (LZ4F_isError(code)) return LZ4F_errorStrings[-(int)(code)];
-    return codeError;
-}
-
-LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult)
-{
-    if (!LZ4F_isError(functionResult)) return LZ4F_OK_NoError;
-    return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult);
-}
-
-static LZ4F_errorCode_t err0r(LZ4F_errorCodes code)
-{
-    LZ4_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t));    /* A compilation error here means sizeof(ptrdiff_t) is not large enough */
-    return (LZ4F_errorCode_t)-(ptrdiff_t)code;
-}
-
-unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; }
-
-
-/*-************************************
-*  Private functions
-**************************************/
-#define MIN(a,b)   ( (a) < (b) ? (a) : (b) )
-
-static size_t LZ4F_getBlockSize(unsigned blockSizeID)
-{
-    static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB };
-
-    if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
-    blockSizeID -= 4;
-    if (blockSizeID > 3) return err0r(LZ4F_ERROR_maxBlockSize_invalid);
-    return blockSizes[blockSizeID];
-}
-
-static BYTE LZ4F_headerChecksum (const void* header, size_t length)
-{
-    U32 const xxh = XXH32(header, length, 0);
-    return (BYTE)(xxh >> 8);
-}
-
-
-/*-************************************
-*  Simple-pass compression functions
-**************************************/
-static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID, const size_t srcSize)
-{
-    LZ4F_blockSizeID_t proposedBSID = LZ4F_max64KB;
-    size_t maxBlockSize = 64 KB;
-    while (requestedBSID > proposedBSID) {
-        if (srcSize <= maxBlockSize)
-            return proposedBSID;
-        proposedBSID = (LZ4F_blockSizeID_t)((int)proposedBSID + 1);
-        maxBlockSize <<= 2;
-    }
-    return requestedBSID;
-}
-
-/* LZ4F_compressBound() :
- * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations.
- * prefsPtr is optional : you can provide NULL as argument, preferences will be set to cover worst case scenario.
- * Result is always the same for a srcSize and prefsPtr, so it can be trusted to size reusable buffers.
- * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations.
- */
-static size_t LZ4F_compressBound_internal(size_t srcSize, const LZ4F_preferences_t* preferencesPtr, size_t alreadyBuffered)
-{
-    LZ4F_preferences_t prefsNull;
-    memset(&prefsNull, 0, sizeof(prefsNull));
-    prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled;   /* worst case */
-    {   const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr;
-        U32 const flush = prefsPtr->autoFlush | (srcSize==0);
-        LZ4F_blockSizeID_t const bid = prefsPtr->frameInfo.blockSizeID;
-        size_t const blockSize = LZ4F_getBlockSize(bid);
-        size_t const maxBuffered = blockSize - 1;
-        size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered);
-        size_t const maxSrcSize = srcSize + bufferedSize;
-        unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize);
-        size_t const partialBlockSize = (srcSize - (srcSize==0)) & (blockSize-1);   /* 0 => -1 == MAX => blockSize-1 */
-        size_t const lastBlockSize = flush ? partialBlockSize : 0;
-        unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0);
-
-        size_t const blockHeaderSize = 4;   /* default, without block CRC option (which cannot be generated with current API) */
-        size_t const frameEnd = 4 + (prefsPtr->frameInfo.contentChecksumFlag*4);
-
-        return (blockHeaderSize * nbBlocks) + (blockSize * nbFullBlocks) + lastBlockSize + frameEnd;;
-    }
-}
-
-size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
-{
-    LZ4F_preferences_t prefs;
-    size_t const headerSize = maxFHSize;      /* max header size, including magic number and frame content size */
-
-    if (preferencesPtr!=NULL) prefs = *preferencesPtr;
-    else memset(&prefs, 0, sizeof(prefs));
-    prefs.autoFlush = 1;
-
-    return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);;
-}
-
-
-/*! LZ4F_compressFrame() :
-* Compress an entire srcBuffer into a valid LZ4 frame, as defined by specification v1.5.0, in a single step.
-* The most important rule is that dstBuffer MUST be large enough (dstMaxSize) to ensure compression completion even in worst case.
-* You can get the minimum value of dstMaxSize by using LZ4F_compressFrameBound()
-* If this condition is not respected, LZ4F_compressFrame() will fail (result is an errorCode)
-* The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will then be set to default.
-* The result of the function is the number of bytes written into dstBuffer.
-* The function outputs an error code if it fails (can be tested using LZ4F_isError())
-*/
-size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
-{
-    LZ4F_cctx_t cctxI;
-    LZ4_stream_t lz4ctx;
-    LZ4F_preferences_t prefs;
-    LZ4F_compressOptions_t options;
-    BYTE* const dstStart = (BYTE*) dstBuffer;
-    BYTE* dstPtr = dstStart;
-    BYTE* const dstEnd = dstStart + dstCapacity;
-
-    memset(&cctxI, 0, sizeof(cctxI));   /* works because no allocation */
-    memset(&options, 0, sizeof(options));
-
-    cctxI.version = LZ4F_VERSION;
-    cctxI.maxBufferSize = 5 MB;   /* mess with real buffer size to prevent allocation; works because autoflush==1 & stableSrc==1 */
-
-    if (preferencesPtr!=NULL)
-        prefs = *preferencesPtr;
-    else
-        memset(&prefs, 0, sizeof(prefs));
-    if (prefs.frameInfo.contentSize != 0)
-        prefs.frameInfo.contentSize = (U64)srcSize;   /* auto-correct content size if selected (!=0) */
-
-    if (prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
-        cctxI.lz4CtxPtr = &lz4ctx;
-        cctxI.lz4CtxLevel = 1;
-    }
-
-    prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize);
-    prefs.autoFlush = 1;
-    if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID))
-        prefs.frameInfo.blockMode = LZ4F_blockIndependent;   /* no need for linked blocks */
-
-    options.stableSrc = 1;
-
-    if (dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs))
-        return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
-
-    { size_t const headerSize = LZ4F_compressBegin(&cctxI, dstBuffer, dstCapacity, &prefs);  /* write header */
-      if (LZ4F_isError(headerSize)) return headerSize;
-      dstPtr += headerSize;   /* header size */ }
-
-    { size_t const cSize = LZ4F_compressUpdate(&cctxI, dstPtr, dstEnd-dstPtr, srcBuffer, srcSize, &options);
-      if (LZ4F_isError(cSize)) return cSize;
-      dstPtr += cSize; }
-
-    { size_t const tailSize = LZ4F_compressEnd(&cctxI, dstPtr, dstEnd-dstPtr, &options);   /* flush last block, and generate suffix */
-      if (LZ4F_isError(tailSize)) return tailSize;
-      dstPtr += tailSize; }
-
-    if (prefs.compressionLevel >= LZ4HC_CLEVEL_MIN)   /* no allocation done with lz4 fast */
-        FREEMEM(cctxI.lz4CtxPtr);
-
-    return (dstPtr - dstStart);
-}
-
-
-/*-*********************************
-*  Advanced compression functions
-***********************************/
-
-/*! LZ4F_createCompressionContext() :
- * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
- * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure.
- * The version provided MUST be LZ4F_VERSION. It is intended to track potential version differences between different binaries.
- * The function will provide a pointer to an allocated LZ4F_compressionContext_t object.
- * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
- * Object can release its memory using LZ4F_freeCompressionContext();
- */
-LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_compressionContext_t* LZ4F_compressionContextPtr, unsigned version)
-{
-    LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)ALLOCATOR(sizeof(LZ4F_cctx_t));
-    if (cctxPtr==NULL) return err0r(LZ4F_ERROR_allocation_failed);
-
-    cctxPtr->version = version;
-    cctxPtr->cStage = 0;   /* Next stage : write header */
-
-    *LZ4F_compressionContextPtr = (LZ4F_compressionContext_t)cctxPtr;
-
-    return LZ4F_OK_NoError;
-}
-
-
-LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_compressionContext_t LZ4F_compressionContext)
-{
-    LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)LZ4F_compressionContext;
-
-    if (cctxPtr != NULL) {  /* null pointers can be safely provided to this function, like free() */
-       FREEMEM(cctxPtr->lz4CtxPtr);
-       FREEMEM(cctxPtr->tmpBuff);
-       FREEMEM(LZ4F_compressionContext);
-    }
-
-    return LZ4F_OK_NoError;
-}
-
-
-/*! LZ4F_compressBegin() :
- * will write the frame header into dstBuffer.
- * dstBuffer must be large enough to accommodate a header (dstCapacity). Maximum header size is LZ4F_HEADER_SIZE_MAX bytes.
- * @return : number of bytes written into dstBuffer for the header
- *           or an error code (can be tested using LZ4F_isError())
- */
-size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstCapacity, const LZ4F_preferences_t* preferencesPtr)
-{
-    LZ4F_preferences_t prefNull;
-    BYTE* const dstStart = (BYTE*)dstBuffer;
-    BYTE* dstPtr = dstStart;
-    BYTE* headerStart;
-    size_t requiredBuffSize;
-
-    if (dstCapacity < maxFHSize) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
-    if (cctxPtr->cStage != 0) return err0r(LZ4F_ERROR_GENERIC);
-    memset(&prefNull, 0, sizeof(prefNull));
-    if (preferencesPtr == NULL) preferencesPtr = &prefNull;
-    cctxPtr->prefs = *preferencesPtr;
-
-    /* ctx Management */
-    {   U32 const tableID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;  /* 0:nothing ; 1:LZ4 table ; 2:HC tables */
-        if (cctxPtr->lz4CtxLevel < tableID) {
-            FREEMEM(cctxPtr->lz4CtxPtr);
-            if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN)
-                cctxPtr->lz4CtxPtr = (void*)LZ4_createStream();
-            else
-                cctxPtr->lz4CtxPtr = (void*)LZ4_createStreamHC();
-            cctxPtr->lz4CtxLevel = tableID;
-        }
-    }
-
-    /* Buffer Management */
-    if (cctxPtr->prefs.frameInfo.blockSizeID == 0) cctxPtr->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
-    cctxPtr->maxBlockSize = LZ4F_getBlockSize(cctxPtr->prefs.frameInfo.blockSizeID);
-
-    requiredBuffSize = cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) * 128 KB);
-    if (preferencesPtr->autoFlush)
-        requiredBuffSize = (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) * 64 KB;   /* just needs dict */
-
-    if (cctxPtr->maxBufferSize < requiredBuffSize) {
-        cctxPtr->maxBufferSize = requiredBuffSize;
-        FREEMEM(cctxPtr->tmpBuff);
-        cctxPtr->tmpBuff = (BYTE*)ALLOCATOR(requiredBuffSize);
-        if (cctxPtr->tmpBuff == NULL) return err0r(LZ4F_ERROR_allocation_failed);
-    }
-    cctxPtr->tmpIn = cctxPtr->tmpBuff;
-    cctxPtr->tmpInSize = 0;
-    XXH32_reset(&(cctxPtr->xxh), 0);
-    if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN)
-        LZ4_resetStream((LZ4_stream_t*)(cctxPtr->lz4CtxPtr));
-    else
-        LZ4_resetStreamHC((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), cctxPtr->prefs.compressionLevel);
-
-    /* Magic Number */
-    LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER);
-    dstPtr += 4;
-    headerStart = dstPtr;
-
-    /* FLG Byte */
-    *dstPtr++ = (BYTE)(((1 & _2BITS) << 6)    /* Version('01') */
-        + ((cctxPtr->prefs.frameInfo.blockMode & _1BIT ) << 5)    /* Block mode */
-        + ((cctxPtr->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)   /* Frame checksum */
-        + ((cctxPtr->prefs.frameInfo.contentSize > 0) << 3));   /* Frame content size */
-    /* BD Byte */
-    *dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4);
-    /* Optional Frame content size field */
-    if (cctxPtr->prefs.frameInfo.contentSize) {
-        LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize);
-        dstPtr += 8;
-        cctxPtr->totalInSize = 0;
-    }
-    /* CRC Byte */
-    *dstPtr = LZ4F_headerChecksum(headerStart, dstPtr - headerStart);
-    dstPtr++;
-
-    cctxPtr->cStage = 1;   /* header written, now request input data block */
-
-    return (dstPtr - dstStart);
-}
-
-
-/* LZ4F_compressBound() :
- *      @ return size of Dst buffer given a srcSize to handle worst case situations.
- *      The LZ4F_frameInfo_t structure is optional : if NULL, preferences will be set to cover worst case situations.
- *      This function cannot fail.
- */
-size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
-{
-    return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1);
-}
-
-
-typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level);
-
-static size_t LZ4F_compressBlock(void* dst, const void* src, size_t srcSize, compressFunc_t compress, void* lz4ctx, int level)
-{
-    /* compress a single block */
-    BYTE* const cSizePtr = (BYTE*)dst;
-    U32 cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+4), (int)(srcSize), (int)(srcSize-1), level);
-    LZ4F_writeLE32(cSizePtr, cSize);
-    if (cSize == 0) {  /* compression failed */
-        cSize = (U32)srcSize;
-        LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG);
-        memcpy(cSizePtr+4, src, srcSize);
-    }
-    return cSize + 4;
-}
-
-
-static int LZ4F_localLZ4_compress_limitedOutput_withState(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level)
-{
-    (void) level;
-    return LZ4_compress_fast_extState(ctx, src, dst, srcSize, dstCapacity, 1);
-}
-
-static int LZ4F_localLZ4_compress_limitedOutput_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level)
-{
-    (void) level;
-    return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, 1);
-}
-
-static int LZ4F_localLZ4_compressHC_limitedOutput_continue(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level)
-{
-    (void) level;
-    return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstSize);
-}
-
-static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level)
-{
-    if (level < LZ4HC_CLEVEL_MIN) {
-        if (blockMode == LZ4F_blockIndependent) return LZ4F_localLZ4_compress_limitedOutput_withState;
-        return LZ4F_localLZ4_compress_limitedOutput_continue;
-    }
-    if (blockMode == LZ4F_blockIndependent) return LZ4_compress_HC_extStateHC;
-    return LZ4F_localLZ4_compressHC_limitedOutput_continue;
-}
-
-static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
-{
-    if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN)
-        return LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
-    return LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
-}
-
-typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus;
-
-/*! LZ4F_compressUpdate() :
-* LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
-* The most important rule is that dstBuffer MUST be large enough (dstCapacity) to ensure compression completion even in worst case.
-* If this condition is not respected, LZ4F_compress() will fail (result is an errorCode)
-* You can get the minimum value of dstCapacity by using LZ4F_compressBound()
-* The LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
-* The result of the function is the number of bytes written into dstBuffer : it can be zero, meaning input data was just buffered.
-* The function outputs an error code if it fails (can be tested using LZ4F_isError())
-*/
-size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize, const LZ4F_compressOptions_t* compressOptionsPtr)
-{
-    LZ4F_compressOptions_t cOptionsNull;
-    size_t const blockSize = cctxPtr->maxBlockSize;
-    const BYTE* srcPtr = (const BYTE*)srcBuffer;
-    const BYTE* const srcEnd = srcPtr + srcSize;
-    BYTE* const dstStart = (BYTE*)dstBuffer;
-    BYTE* dstPtr = dstStart;
-    LZ4F_lastBlockStatus lastBlockCompressed = notDone;
-    compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
-
-
-    if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC);
-    if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize)) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
-    memset(&cOptionsNull, 0, sizeof(cOptionsNull));
-    if (compressOptionsPtr == NULL) compressOptionsPtr = &cOptionsNull;
-
-    /* complete tmp buffer */
-    if (cctxPtr->tmpInSize > 0) {   /* some data already within tmp buffer */
-        size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize;
-        if (sizeToCopy > srcSize) {
-            /* add src to tmpIn buffer */
-            memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize);
-            srcPtr = srcEnd;
-            cctxPtr->tmpInSize += srcSize;
-            /* still needs some CRC */
-        } else {
-            /* complete tmpIn block and then compress it */
-            lastBlockCompressed = fromTmpBuffer;
-            memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy);
-            srcPtr += sizeToCopy;
-
-            dstPtr += LZ4F_compressBlock(dstPtr, cctxPtr->tmpIn, blockSize, compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel);
-
-            if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize;
-            cctxPtr->tmpInSize = 0;
-        }
-    }
-
-    while ((size_t)(srcEnd - srcPtr) >= blockSize) {
-        /* compress full block */
-        lastBlockCompressed = fromSrcBuffer;
-        dstPtr += LZ4F_compressBlock(dstPtr, srcPtr, blockSize, compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel);
-        srcPtr += blockSize;
-    }
-
-    if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) {
-        /* compress remaining input < blockSize */
-        lastBlockCompressed = fromSrcBuffer;
-        dstPtr += LZ4F_compressBlock(dstPtr, srcPtr, srcEnd - srcPtr, compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel);
-        srcPtr  = srcEnd;
-    }
-
-    /* preserve dictionary if necessary */
-    if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) {
-        if (compressOptionsPtr->stableSrc) {
-            cctxPtr->tmpIn = cctxPtr->tmpBuff;
-        } else {
-            int realDictSize = LZ4F_localSaveDict(cctxPtr);
-            if (realDictSize==0) return err0r(LZ4F_ERROR_GENERIC);
-            cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
-        }
-    }
-
-    /* keep tmpIn within limits */
-    if ((cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)   /* necessarily LZ4F_blockLinked && lastBlockCompressed==fromTmpBuffer */
-        && !(cctxPtr->prefs.autoFlush))
-    {
-        int realDictSize = LZ4F_localSaveDict(cctxPtr);
-        cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
-    }
-
-    /* some input data left, necessarily < blockSize */
-    if (srcPtr < srcEnd) {
-        /* fill tmp buffer */
-        size_t const sizeToCopy = srcEnd - srcPtr;
-        memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy);
-        cctxPtr->tmpInSize = sizeToCopy;
-    }
-
-    if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled)
-        XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize);
-
-    cctxPtr->totalInSize += srcSize;
-    return dstPtr - dstStart;
-}
-
-
-/*! LZ4F_flush() :
-* Should you need to create compressed data immediately, without waiting for a block to be filled,
-* you can call LZ4_flush(), which will immediately compress any remaining data stored within compressionContext.
-* The result of the function is the number of bytes written into dstBuffer
-* (it can be zero, this means there was no data left within compressionContext)
-* The function outputs an error code if it fails (can be tested using LZ4F_isError())
-* The LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
-*/
-size_t LZ4F_flush(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstCapacity, const LZ4F_compressOptions_t* compressOptionsPtr)
-{
-    BYTE* const dstStart = (BYTE*)dstBuffer;
-    BYTE* dstPtr = dstStart;
-    compressFunc_t compress;
-
-    if (cctxPtr->tmpInSize == 0) return 0;   /* nothing to flush */
-    if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC);
-    if (dstCapacity < (cctxPtr->tmpInSize + 4)) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);   /* +4 : block header(4)  */
-    (void)compressOptionsPtr;   /* not yet useful */
-
-    /* select compression function */
-    compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
-
-    /* compress tmp buffer */
-    dstPtr += LZ4F_compressBlock(dstPtr, cctxPtr->tmpIn, cctxPtr->tmpInSize, compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel);
-    if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += cctxPtr->tmpInSize;
-    cctxPtr->tmpInSize = 0;
-
-    /* keep tmpIn within limits */
-    if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) {  /* necessarily LZ4F_blockLinked */
-        int realDictSize = LZ4F_localSaveDict(cctxPtr);
-        cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
-    }
-
-    return dstPtr - dstStart;
-}
-
-
-/*! LZ4F_compressEnd() :
-* When you want to properly finish the compressed frame, just call LZ4F_compressEnd().
-* It will flush whatever data remained within compressionContext (like LZ4_flush())
-* but also properly finalize the frame, with an endMark and a checksum.
-* The result of the function is the number of bytes written into dstBuffer (necessarily >= 4 (endMark size))
-* The function outputs an error code if it fails (can be tested using LZ4F_isError())
-* The LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
-* compressionContext can then be used again, starting with LZ4F_compressBegin(). The preferences will remain the same.
-*/
-size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstMaxSize, const LZ4F_compressOptions_t* compressOptionsPtr)
-{
-    BYTE* const dstStart = (BYTE*)dstBuffer;
-    BYTE* dstPtr = dstStart;
-
-    size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstMaxSize, compressOptionsPtr);
-    if (LZ4F_isError(flushSize)) return flushSize;
-    dstPtr += flushSize;
-
-    LZ4F_writeLE32(dstPtr, 0);
-    dstPtr+=4;   /* endMark */
-
-    if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
-        U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
-        LZ4F_writeLE32(dstPtr, xxh);
-        dstPtr+=4;   /* content Checksum */
-    }
-
-    cctxPtr->cStage = 0;   /* state is now re-usable (with identical preferences) */
-    cctxPtr->maxBufferSize = 0;  /* reuse HC context */
-
-    if (cctxPtr->prefs.frameInfo.contentSize) {
-        if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)
-            return err0r(LZ4F_ERROR_frameSize_wrong);
-    }
-
-    return dstPtr - dstStart;
-}
-
-
-/*-***************************************************
-*   Frame Decompression
-*****************************************************/
-
-struct LZ4F_dctx_s {
-    LZ4F_frameInfo_t frameInfo;
-    U32    version;
-    U32    dStage;
-    U64    frameRemainingSize;
-    size_t maxBlockSize;
-    size_t maxBufferSize;
-    BYTE*  tmpIn;
-    size_t tmpInSize;
-    size_t tmpInTarget;
-    BYTE*  tmpOutBuffer;
-    const BYTE*  dict;
-    size_t dictSize;
-    BYTE*  tmpOut;
-    size_t tmpOutSize;
-    size_t tmpOutStart;
-    XXH32_state_t xxh;
-    BYTE   header[16];
-};  /* typedef'd to LZ4F_dctx in lz4frame.h */
-
-
-/*! LZ4F_createDecompressionContext() :
-*   Create a decompressionContext object, which will track all decompression operations.
-*   Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object.
-*   Object can later be released using LZ4F_freeDecompressionContext().
-*   @return : if != 0, there was an error during context creation.
-*/
-LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber)
-{
-    LZ4F_dctx* const dctxPtr = (LZ4F_dctx*)ALLOCATOR(sizeof(LZ4F_dctx));
-    if (dctxPtr==NULL) return err0r(LZ4F_ERROR_GENERIC);
-
-    dctxPtr->version = versionNumber;
-    *LZ4F_decompressionContextPtr = dctxPtr;
-    return LZ4F_OK_NoError;
-}
-
-LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctxPtr)
-{
-    LZ4F_errorCode_t result = LZ4F_OK_NoError;
-    if (dctxPtr != NULL) {   /* can accept NULL input, like free() */
-      result = (LZ4F_errorCode_t)dctxPtr->dStage;
-      FREEMEM(dctxPtr->tmpIn);
-      FREEMEM(dctxPtr->tmpOutBuffer);
-      FREEMEM(dctxPtr);
-    }
-    return result;
-}
-
-
-/*==---   Streaming Decompression operations   ---==*/
-
-typedef enum {
-    dstage_getHeader=0, dstage_storeHeader,
-    dstage_init,
-    dstage_getCBlockSize, dstage_storeCBlockSize,
-    dstage_copyDirect,
-    dstage_getCBlock, dstage_storeCBlock,
-    dstage_decodeCBlock, dstage_decodeCBlock_intoDst,
-    dstage_decodeCBlock_intoTmp, dstage_flushOut,
-    dstage_getSuffix, dstage_storeSuffix,
-    dstage_getSFrameSize, dstage_storeSFrameSize,
-    dstage_skipSkippable
-} dStage_t;
-
-LZ4F_errorCode_t LZ4F_resetDecompressionContext(LZ4F_dctx* dctx)
-{
-    dctx->dStage = dstage_getHeader;
-    return 0;
-}
-
-
-/*! LZ4F_headerSize() :
-*   @return : size of frame header
-*             or an error code, which can be tested using LZ4F_isError()
-*/
-static size_t LZ4F_headerSize(const void* src, size_t srcSize)
-{
-    /* minimal srcSize to determine header size */
-    if (srcSize < 5) return err0r(LZ4F_ERROR_frameHeader_incomplete);
-
-    /* special case : skippable frames */
-    if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) return 8;
-
-    /* control magic number */
-    if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER) return err0r(LZ4F_ERROR_frameType_unknown);
-
-    /* Frame Header Size */
-    {   BYTE const FLG = ((const BYTE*)src)[4];
-        U32 const contentSizeFlag = (FLG>>3) & _1BIT;
-        return contentSizeFlag ? maxFHSize : minFHSize;
-    }
-}
-
-
-/*! LZ4F_decodeHeader() :
-   input   : `src` points at the **beginning of the frame**
-   output  : set internal values of dctx, such as
-             dctxPtr->frameInfo and dctxPtr->dStage.
-             Also allocates internal buffers.
-   @return : nb Bytes read from src (necessarily <= srcSize)
-             or an error code (testable with LZ4F_isError())
-*/
-static size_t LZ4F_decodeHeader(LZ4F_dctx* dctxPtr, const void* src, size_t srcSize)
-{
-    unsigned blockMode, contentSizeFlag, contentChecksumFlag, blockSizeID;
-    size_t frameHeaderSize;
-    const BYTE* srcPtr = (const BYTE*)src;
-
-    /* need to decode header to get frameInfo */
-    if (srcSize < minFHSize) return err0r(LZ4F_ERROR_frameHeader_incomplete);   /* minimal frame header size */
-    memset(&(dctxPtr->frameInfo), 0, sizeof(dctxPtr->frameInfo));
-
-    /* special case : skippable frames */
-    if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) {
-        dctxPtr->frameInfo.frameType = LZ4F_skippableFrame;
-        if (src == (void*)(dctxPtr->header)) {
-            dctxPtr->tmpInSize = srcSize;
-            dctxPtr->tmpInTarget = 8;
-            dctxPtr->dStage = dstage_storeSFrameSize;
-            return srcSize;
-        } else {
-            dctxPtr->dStage = dstage_getSFrameSize;
-            return 4;
-        }
-    }
-
-    /* control magic number */
-    if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) return err0r(LZ4F_ERROR_frameType_unknown);
-    dctxPtr->frameInfo.frameType = LZ4F_frame;
-
-    /* Flags */
-    {   U32 const FLG = srcPtr[4];
-        U32 const version = (FLG>>6) & _2BITS;
-        U32 const blockChecksumFlag = (FLG>>4) & _1BIT;
-        blockMode = (FLG>>5) & _1BIT;
-        contentSizeFlag = (FLG>>3) & _1BIT;
-        contentChecksumFlag = (FLG>>2) & _1BIT;
-        /* validate */
-        if (((FLG>>0)&_2BITS) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bits */
-        if (version != 1) return err0r(LZ4F_ERROR_headerVersion_wrong);        /* Version Number, only supported value */
-        if (blockChecksumFlag != 0) return err0r(LZ4F_ERROR_blockChecksum_unsupported); /* Not supported for the time being */
-    }
-
-    /* Frame Header Size */
-    frameHeaderSize = contentSizeFlag ? maxFHSize : minFHSize;
-
-    if (srcSize < frameHeaderSize) {
-        /* not enough input to fully decode frame header */
-        if (srcPtr != dctxPtr->header)
-            memcpy(dctxPtr->header, srcPtr, srcSize);
-        dctxPtr->tmpInSize = srcSize;
-        dctxPtr->tmpInTarget = frameHeaderSize;
-        dctxPtr->dStage = dstage_storeHeader;
-        return srcSize;
-    }
-
-    {   U32 const BD = srcPtr[5];
-        blockSizeID = (BD>>4) & _3BITS;
-        /* validate */
-        if (((BD>>7)&_1BIT) != 0) return err0r(LZ4F_ERROR_reservedFlag_set);   /* Reserved bit */
-        if (blockSizeID < 4) return err0r(LZ4F_ERROR_maxBlockSize_invalid);    /* 4-7 only supported values for the time being */
-        if (((BD>>0)&_4BITS) != 0) return err0r(LZ4F_ERROR_reservedFlag_set);  /* Reserved bits */
-    }
-
-    /* check header */
-    { BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5);
-      if (HC != srcPtr[frameHeaderSize-1]) return err0r(LZ4F_ERROR_headerChecksum_invalid); }
-
-    /* save */
-    dctxPtr->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode;
-    dctxPtr->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag;
-    dctxPtr->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID;
-    dctxPtr->maxBlockSize = LZ4F_getBlockSize(blockSizeID);
-    if (contentSizeFlag)
-        dctxPtr->frameRemainingSize = dctxPtr->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
-
-    dctxPtr->dStage = dstage_init;
-
-    return frameHeaderSize;
-}
-
-
-/*! LZ4F_getFrameInfo() :
- * This function extracts frame parameters (such as max blockSize, frame checksum, etc.).
- * Its usage is optional. The objective is to provide relevant information for allocation purposes.
- * This function works in 2 situations :
- *   - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process.
- *     Amount of input data provided must be large enough to successfully decode the frame header.
- *     A header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. It's possible to provide more input data than this minimum.
- *   - After decoding has been started. In which case, no input is read, frame parameters are extracted from dctx.
- * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value).
- * Decompression must resume from (srcBuffer + *srcSizePtr).
- * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call,
- *           or an error code which can be tested using LZ4F_isError()
- * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped.
- * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
- */
-LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctxPtr, LZ4F_frameInfo_t* frameInfoPtr,
-                                   const void* srcBuffer, size_t* srcSizePtr)
-{
-    if (dctxPtr->dStage > dstage_storeHeader) {  /* assumption :  dstage_* header enum at beginning of range */
-        /* frameInfo already decoded */
-        size_t o=0, i=0;
-        *srcSizePtr = 0;
-        *frameInfoPtr = dctxPtr->frameInfo;
-        return LZ4F_decompress(dctxPtr, NULL, &o, NULL, &i, NULL);  /* returns : recommended nb of bytes for LZ4F_decompress() */
-    } else {
-        if (dctxPtr->dStage == dstage_storeHeader) {
-            /* frame decoding already started, in the middle of header => automatic fail */
-            *srcSizePtr = 0;
-            return err0r(LZ4F_ERROR_frameDecoding_alreadyStarted);
-        } else {
-            size_t decodeResult;
-            size_t const hSize = LZ4F_headerSize(srcBuffer, *srcSizePtr);
-            if (LZ4F_isError(hSize)) { *srcSizePtr=0; return hSize; }
-            if (*srcSizePtr < hSize) { *srcSizePtr=0; return err0r(LZ4F_ERROR_frameHeader_incomplete); }
-
-            decodeResult = LZ4F_decodeHeader(dctxPtr, srcBuffer, hSize);
-            if (LZ4F_isError(decodeResult)) {
-                *srcSizePtr = 0;
-            } else {
-                *srcSizePtr = decodeResult;
-                decodeResult = BHSize;   /* block header size */
-            }
-            *frameInfoPtr = dctxPtr->frameInfo;
-            return decodeResult;
-    }   }
-}
-
-
-/* trivial redirector, for common prototype */
-static int LZ4F_decompress_safe (const char* source, char* dest, int compressedSize, int maxDecompressedSize, const char* dictStart, int dictSize)
-{
-    (void)dictStart; (void)dictSize;
-    return LZ4_decompress_safe (source, dest, compressedSize, maxDecompressedSize);
-}
-
-
-static void LZ4F_updateDict(LZ4F_dctx* dctxPtr, const BYTE* dstPtr, size_t dstSize, const BYTE* dstPtr0, unsigned withinTmp)
-{
-    if (dctxPtr->dictSize==0)
-        dctxPtr->dict = (const BYTE*)dstPtr;   /* priority to dictionary continuity */
-
-    if (dctxPtr->dict + dctxPtr->dictSize == dstPtr) {  /* dictionary continuity */
-        dctxPtr->dictSize += dstSize;
-        return;
-    }
-
-    if (dstPtr - dstPtr0 + dstSize >= 64 KB) {  /* dstBuffer large enough to become dictionary */
-        dctxPtr->dict = (const BYTE*)dstPtr0;
-        dctxPtr->dictSize = dstPtr - dstPtr0 + dstSize;
-        return;
-    }
-
-    if ((withinTmp) && (dctxPtr->dict == dctxPtr->tmpOutBuffer)) {
-        /* assumption : dctxPtr->dict + dctxPtr->dictSize == dctxPtr->tmpOut + dctxPtr->tmpOutStart */
-        dctxPtr->dictSize += dstSize;
-        return;
-    }
-
-    if (withinTmp) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */
-        size_t const preserveSize = dctxPtr->tmpOut - dctxPtr->tmpOutBuffer;
-        size_t copySize = 64 KB - dctxPtr->tmpOutSize;
-        const BYTE* const oldDictEnd = dctxPtr->dict + dctxPtr->dictSize - dctxPtr->tmpOutStart;
-        if (dctxPtr->tmpOutSize > 64 KB) copySize = 0;
-        if (copySize > preserveSize) copySize = preserveSize;
-
-        memcpy(dctxPtr->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
-
-        dctxPtr->dict = dctxPtr->tmpOutBuffer;
-        dctxPtr->dictSize = preserveSize + dctxPtr->tmpOutStart + dstSize;
-        return;
-    }
-
-    if (dctxPtr->dict == dctxPtr->tmpOutBuffer) {    /* copy dst into tmp to complete dict */
-        if (dctxPtr->dictSize + dstSize > dctxPtr->maxBufferSize) {  /* tmp buffer not large enough */
-            size_t const preserveSize = 64 KB - dstSize;   /* note : dstSize < 64 KB */
-            memcpy(dctxPtr->tmpOutBuffer, dctxPtr->dict + dctxPtr->dictSize - preserveSize, preserveSize);
-            dctxPtr->dictSize = preserveSize;
-        }
-        memcpy(dctxPtr->tmpOutBuffer + dctxPtr->dictSize, dstPtr, dstSize);
-        dctxPtr->dictSize += dstSize;
-        return;
-    }
-
-    /* join dict & dest into tmp */
-    {   size_t preserveSize = 64 KB - dstSize;   /* note : dstSize < 64 KB */
-        if (preserveSize > dctxPtr->dictSize) preserveSize = dctxPtr->dictSize;
-        memcpy(dctxPtr->tmpOutBuffer, dctxPtr->dict + dctxPtr->dictSize - preserveSize, preserveSize);
-        memcpy(dctxPtr->tmpOutBuffer + preserveSize, dstPtr, dstSize);
-        dctxPtr->dict = dctxPtr->tmpOutBuffer;
-        dctxPtr->dictSize = preserveSize + dstSize;
-    }
-}
-
-
-
-/*! LZ4F_decompress() :
-* Call this function repetitively to regenerate data compressed within srcBuffer.
-* The function will attempt to decode up to *srcSizePtr bytes from srcBuffer, into dstBuffer of capacity *dstSizePtr.
-*
-* The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value).
-*
-* The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value).
-* If the number of bytes read is < number of bytes provided, then the decompression operation is not complete.
-* Remaining data will have to be presented again in a subsequent invocation.
-*
-* The function result is an hint of the better srcSize to use for next call to LZ4F_decompress.
-* Basically, it's the size of the current (or remaining) compressed block + header of next block.
-* Respecting the hint provides some boost to performance, since it allows less buffer shuffling.
-* Note that this is just a hint, it's always possible to any srcSize value.
-* When a frame is fully decoded, @return will be 0.
-* If decompression failed, @return is an error code which can be tested using LZ4F_isError().
-*/
-size_t LZ4F_decompress(LZ4F_dctx* dctxPtr,
-                       void* dstBuffer, size_t* dstSizePtr,
-                       const void* srcBuffer, size_t* srcSizePtr,
-                       const LZ4F_decompressOptions_t* decompressOptionsPtr)
-{
-    LZ4F_decompressOptions_t optionsNull;
-    const BYTE* const srcStart = (const BYTE*)srcBuffer;
-    const BYTE* const srcEnd = srcStart + *srcSizePtr;
-    const BYTE* srcPtr = srcStart;
-    BYTE* const dstStart = (BYTE*)dstBuffer;
-    BYTE* const dstEnd = dstStart + *dstSizePtr;
-    BYTE* dstPtr = dstStart;
-    const BYTE* selectedIn = NULL;
-    unsigned doAnotherStage = 1;
-    size_t nextSrcSizeHint = 1;
-
-
-    memset(&optionsNull, 0, sizeof(optionsNull));
-    if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull;
-    *srcSizePtr = 0;
-    *dstSizePtr = 0;
-
-    /* behaves like a state machine */
-
-    while (doAnotherStage) {
-
-        switch(dctxPtr->dStage)
-        {
-
-        case dstage_getHeader:
-            if ((size_t)(srcEnd-srcPtr) >= maxFHSize) {  /* enough to decode - shortcut */
-                LZ4F_errorCode_t const hSize = LZ4F_decodeHeader(dctxPtr, srcPtr, srcEnd-srcPtr);  /* will change dStage appropriately */
-                if (LZ4F_isError(hSize)) return hSize;
-                srcPtr += hSize;
-                break;
-            }
-            dctxPtr->tmpInSize = 0;
-            if (srcEnd-srcPtr == 0) return minFHSize;   /* 0-size input */
-            dctxPtr->tmpInTarget = minFHSize;   /* minimum to attempt decode */
-            dctxPtr->dStage = dstage_storeHeader;
-            /* pass-through */
-
-        case dstage_storeHeader:
-            {   size_t sizeToCopy = dctxPtr->tmpInTarget - dctxPtr->tmpInSize;
-                if (sizeToCopy > (size_t)(srcEnd - srcPtr)) sizeToCopy =  srcEnd - srcPtr;
-                memcpy(dctxPtr->header + dctxPtr->tmpInSize, srcPtr, sizeToCopy);
-                dctxPtr->tmpInSize += sizeToCopy;
-                srcPtr += sizeToCopy;
-                if (dctxPtr->tmpInSize < dctxPtr->tmpInTarget) {
-                    nextSrcSizeHint = (dctxPtr->tmpInTarget - dctxPtr->tmpInSize) + BHSize;   /* rest of header + nextBlockHeader */
-                    doAnotherStage = 0;   /* not enough src data, ask for some more */
-                    break;
-                }
-                {   LZ4F_errorCode_t const hSize = LZ4F_decodeHeader(dctxPtr, dctxPtr->header, dctxPtr->tmpInTarget);
-                    if (LZ4F_isError(hSize)) return hSize;
-                }
-                break;
-            }
-
-        case dstage_init:
-            if (dctxPtr->frameInfo.contentChecksumFlag) XXH32_reset(&(dctxPtr->xxh), 0);
-            /* internal buffers allocation */
-            {   size_t const bufferNeeded = dctxPtr->maxBlockSize + ((dctxPtr->frameInfo.blockMode==LZ4F_blockLinked) * 128 KB);
-                if (bufferNeeded > dctxPtr->maxBufferSize) {   /* tmp buffers too small */
-                    dctxPtr->maxBufferSize = 0;   /* ensure allocation will be re-attempted on next entry*/
-                    FREEMEM(dctxPtr->tmpIn);
-                    dctxPtr->tmpIn = (BYTE*)ALLOCATOR(dctxPtr->maxBlockSize);
-                    if (dctxPtr->tmpIn == NULL) return err0r(LZ4F_ERROR_allocation_failed);
-                    FREEMEM(dctxPtr->tmpOutBuffer);
-                    dctxPtr->tmpOutBuffer= (BYTE*)ALLOCATOR(bufferNeeded);
-                    if (dctxPtr->tmpOutBuffer== NULL) return err0r(LZ4F_ERROR_allocation_failed);
-                    dctxPtr->maxBufferSize = bufferNeeded;
-            }   }
-            dctxPtr->tmpInSize = 0;
-            dctxPtr->tmpInTarget = 0;
-            dctxPtr->dict = dctxPtr->tmpOutBuffer;
-            dctxPtr->dictSize = 0;
-            dctxPtr->tmpOut = dctxPtr->tmpOutBuffer;
-            dctxPtr->tmpOutStart = 0;
-            dctxPtr->tmpOutSize = 0;
-
-            dctxPtr->dStage = dstage_getCBlockSize;
-            /* pass-through */
-
-        case dstage_getCBlockSize:
-            if ((size_t)(srcEnd - srcPtr) >= BHSize) {
-                selectedIn = srcPtr;
-                srcPtr += BHSize;
-            } else {
-                /* not enough input to read cBlockSize field */
-                dctxPtr->tmpInSize = 0;
-                dctxPtr->dStage = dstage_storeCBlockSize;
-            }
-
-            if (dctxPtr->dStage == dstage_storeCBlockSize)   /* can be skipped */
-        case dstage_storeCBlockSize:
-            {   size_t sizeToCopy = BHSize - dctxPtr->tmpInSize;
-                if (sizeToCopy > (size_t)(srcEnd - srcPtr)) sizeToCopy = srcEnd - srcPtr;
-                memcpy(dctxPtr->tmpIn + dctxPtr->tmpInSize, srcPtr, sizeToCopy);
-                srcPtr += sizeToCopy;
-                dctxPtr->tmpInSize += sizeToCopy;
-                if (dctxPtr->tmpInSize < BHSize) {   /* not enough input to get full cBlockSize; wait for more */
-                    nextSrcSizeHint = BHSize - dctxPtr->tmpInSize;
-                    doAnotherStage  = 0;
-                    break;
-                }
-                selectedIn = dctxPtr->tmpIn;
-            }
-
-        /* case dstage_decodeCBlockSize: */   /* no more direct access, to prevent scan-build warning */
-            {   size_t const nextCBlockSize = LZ4F_readLE32(selectedIn) & 0x7FFFFFFFU;
-                if (nextCBlockSize==0) {  /* frameEnd signal, no more CBlock */
-                    dctxPtr->dStage = dstage_getSuffix;
-                    break;
-                }
-                if (nextCBlockSize > dctxPtr->maxBlockSize) return err0r(LZ4F_ERROR_GENERIC);   /* invalid cBlockSize */
-                dctxPtr->tmpInTarget = nextCBlockSize;
-                if (LZ4F_readLE32(selectedIn) & LZ4F_BLOCKUNCOMPRESSED_FLAG) {
-                    dctxPtr->dStage = dstage_copyDirect;
-                    break;
-                }
-                dctxPtr->dStage = dstage_getCBlock;
-                if (dstPtr==dstEnd) {
-                    nextSrcSizeHint = nextCBlockSize + BHSize;
-                    doAnotherStage = 0;
-                }
-                break;
-            }
-
-        case dstage_copyDirect:   /* uncompressed block */
-            {   size_t sizeToCopy = dctxPtr->tmpInTarget;
-                if ((size_t)(srcEnd-srcPtr) < sizeToCopy) sizeToCopy = srcEnd - srcPtr;  /* not enough input to read full block */
-                if ((size_t)(dstEnd-dstPtr) < sizeToCopy) sizeToCopy = dstEnd - dstPtr;
-                memcpy(dstPtr, srcPtr, sizeToCopy);
-                if (dctxPtr->frameInfo.contentChecksumFlag) XXH32_update(&(dctxPtr->xxh), srcPtr, sizeToCopy);
-                if (dctxPtr->frameInfo.contentSize) dctxPtr->frameRemainingSize -= sizeToCopy;
-
-                /* dictionary management */
-                if (dctxPtr->frameInfo.blockMode==LZ4F_blockLinked)
-                    LZ4F_updateDict(dctxPtr, dstPtr, sizeToCopy, dstStart, 0);
-
-                srcPtr += sizeToCopy;
-                dstPtr += sizeToCopy;
-                if (sizeToCopy == dctxPtr->tmpInTarget) {  /* all copied */
-                    dctxPtr->dStage = dstage_getCBlockSize;
-                    break;
-                }
-                dctxPtr->tmpInTarget -= sizeToCopy;   /* still need to copy more */
-                nextSrcSizeHint = dctxPtr->tmpInTarget + BHSize;
-                doAnotherStage = 0;
-                break;
-            }
-
-        case dstage_getCBlock:   /* entry from dstage_decodeCBlockSize */
-            if ((size_t)(srcEnd-srcPtr) < dctxPtr->tmpInTarget) {
-                dctxPtr->tmpInSize = 0;
-                dctxPtr->dStage = dstage_storeCBlock;
-                break;
-            }
-            selectedIn = srcPtr;
-            srcPtr += dctxPtr->tmpInTarget;
-            dctxPtr->dStage = dstage_decodeCBlock;
-            break;
-
-        case dstage_storeCBlock:
-            {   size_t sizeToCopy = dctxPtr->tmpInTarget - dctxPtr->tmpInSize;
-                if (sizeToCopy > (size_t)(srcEnd-srcPtr)) sizeToCopy = srcEnd-srcPtr;
-                memcpy(dctxPtr->tmpIn + dctxPtr->tmpInSize, srcPtr, sizeToCopy);
-                dctxPtr->tmpInSize += sizeToCopy;
-                srcPtr += sizeToCopy;
-                if (dctxPtr->tmpInSize < dctxPtr->tmpInTarget) { /* need more input */
-                    nextSrcSizeHint = (dctxPtr->tmpInTarget - dctxPtr->tmpInSize) + BHSize;
-                    doAnotherStage=0;
-                    break;
-                }
-                selectedIn = dctxPtr->tmpIn;
-                dctxPtr->dStage = dstage_decodeCBlock;
-                /* pass-through */
-            }
-
-        case dstage_decodeCBlock:
-            if ((size_t)(dstEnd-dstPtr) < dctxPtr->maxBlockSize)   /* not enough place into dst : decode into tmpOut */
-                dctxPtr->dStage = dstage_decodeCBlock_intoTmp;
-            else
-                dctxPtr->dStage = dstage_decodeCBlock_intoDst;
-            break;
-
-        case dstage_decodeCBlock_intoDst:
-            {   int (*decoder)(const char*, char*, int, int, const char*, int);
-                int decodedSize;
-
-                if (dctxPtr->frameInfo.blockMode == LZ4F_blockLinked)
-                    decoder = LZ4_decompress_safe_usingDict;
-                else
-                    decoder = LZ4F_decompress_safe;
-
-                decodedSize = decoder((const char*)selectedIn, (char*)dstPtr, (int)dctxPtr->tmpInTarget, (int)dctxPtr->maxBlockSize, (const char*)dctxPtr->dict, (int)dctxPtr->dictSize);
-                if (decodedSize < 0) return err0r(LZ4F_ERROR_GENERIC);   /* decompression failed */
-                if (dctxPtr->frameInfo.contentChecksumFlag) XXH32_update(&(dctxPtr->xxh), dstPtr, decodedSize);
-                if (dctxPtr->frameInfo.contentSize) dctxPtr->frameRemainingSize -= decodedSize;
-
-                /* dictionary management */
-                if (dctxPtr->frameInfo.blockMode==LZ4F_blockLinked)
-                    LZ4F_updateDict(dctxPtr, dstPtr, decodedSize, dstStart, 0);
-
-                dstPtr += decodedSize;
-                dctxPtr->dStage = dstage_getCBlockSize;
-                break;
-            }
-
-        case dstage_decodeCBlock_intoTmp:
-            /* not enough place into dst : decode into tmpOut */
-            {   int (*decoder)(const char*, char*, int, int, const char*, int);
-                int decodedSize;
-
-                if (dctxPtr->frameInfo.blockMode == LZ4F_blockLinked)
-                    decoder = LZ4_decompress_safe_usingDict;
-                else
-                    decoder = LZ4F_decompress_safe;
-
-                /* ensure enough place for tmpOut */
-                if (dctxPtr->frameInfo.blockMode == LZ4F_blockLinked) {
-                    if (dctxPtr->dict == dctxPtr->tmpOutBuffer) {
-                        if (dctxPtr->dictSize > 128 KB) {
-                            memcpy(dctxPtr->tmpOutBuffer, dctxPtr->dict + dctxPtr->dictSize - 64 KB, 64 KB);
-                            dctxPtr->dictSize = 64 KB;
-                        }
-                        dctxPtr->tmpOut = dctxPtr->tmpOutBuffer + dctxPtr->dictSize;
-                    } else {  /* dict not within tmp */
-                        size_t reservedDictSpace = dctxPtr->dictSize;
-                        if (reservedDictSpace > 64 KB) reservedDictSpace = 64 KB;
-                        dctxPtr->tmpOut = dctxPtr->tmpOutBuffer + reservedDictSpace;
-                    }
-                }
-
-                /* Decode */
-                decodedSize = decoder((const char*)selectedIn, (char*)dctxPtr->tmpOut, (int)dctxPtr->tmpInTarget, (int)dctxPtr->maxBlockSize, (const char*)dctxPtr->dict, (int)dctxPtr->dictSize);
-                if (decodedSize < 0) return err0r(LZ4F_ERROR_decompressionFailed);   /* decompression failed */
-                if (dctxPtr->frameInfo.contentChecksumFlag) XXH32_update(&(dctxPtr->xxh), dctxPtr->tmpOut, decodedSize);
-                if (dctxPtr->frameInfo.contentSize) dctxPtr->frameRemainingSize -= decodedSize;
-                dctxPtr->tmpOutSize = decodedSize;
-                dctxPtr->tmpOutStart = 0;
-                dctxPtr->dStage = dstage_flushOut;
-                break;
-            }
-
-        case dstage_flushOut:  /* flush decoded data from tmpOut to dstBuffer */
-            {   size_t sizeToCopy = dctxPtr->tmpOutSize - dctxPtr->tmpOutStart;
-                if (sizeToCopy > (size_t)(dstEnd-dstPtr)) sizeToCopy = dstEnd-dstPtr;
-                memcpy(dstPtr, dctxPtr->tmpOut + dctxPtr->tmpOutStart, sizeToCopy);
-
-                /* dictionary management */
-                if (dctxPtr->frameInfo.blockMode==LZ4F_blockLinked)
-                    LZ4F_updateDict(dctxPtr, dstPtr, sizeToCopy, dstStart, 1);
-
-                dctxPtr->tmpOutStart += sizeToCopy;
-                dstPtr += sizeToCopy;
-
-                /* end of flush ? */
-                if (dctxPtr->tmpOutStart == dctxPtr->tmpOutSize) {
-                    dctxPtr->dStage = dstage_getCBlockSize;
-                    break;
-                }
-                nextSrcSizeHint = BHSize;
-                doAnotherStage = 0;   /* still some data to flush */
-                break;
-            }
-
-        case dstage_getSuffix:
-            {   size_t const suffixSize = dctxPtr->frameInfo.contentChecksumFlag * 4;
-                if (dctxPtr->frameRemainingSize) return err0r(LZ4F_ERROR_frameSize_wrong);   /* incorrect frame size decoded */
-                if (suffixSize == 0) {  /* frame completed */
-                    nextSrcSizeHint = 0;
-                    dctxPtr->dStage = dstage_getHeader;
-                    doAnotherStage = 0;
-                    break;
-                }
-                if ((srcEnd - srcPtr) < 4) {  /* not enough size for entire CRC */
-                    dctxPtr->tmpInSize = 0;
-                    dctxPtr->dStage = dstage_storeSuffix;
-                } else {
-                    selectedIn = srcPtr;
-                    srcPtr += 4;
-                }
-            }
-
-            if (dctxPtr->dStage == dstage_storeSuffix)   /* can be skipped */
-        case dstage_storeSuffix:
-            {
-                size_t sizeToCopy = 4 - dctxPtr->tmpInSize;
-                if (sizeToCopy > (size_t)(srcEnd - srcPtr)) sizeToCopy = srcEnd - srcPtr;
-                memcpy(dctxPtr->tmpIn + dctxPtr->tmpInSize, srcPtr, sizeToCopy);
-                srcPtr += sizeToCopy;
-                dctxPtr->tmpInSize += sizeToCopy;
-                if (dctxPtr->tmpInSize < 4) { /* not enough input to read complete suffix */
-                    nextSrcSizeHint = 4 - dctxPtr->tmpInSize;
-                    doAnotherStage=0;
-                    break;
-                }
-                selectedIn = dctxPtr->tmpIn;
-            }
-
-        /* case dstage_checkSuffix: */   /* no direct call, to avoid scan-build warning */
-            {   U32 const readCRC = LZ4F_readLE32(selectedIn);
-                U32 const resultCRC = XXH32_digest(&(dctxPtr->xxh));
-                if (readCRC != resultCRC) return err0r(LZ4F_ERROR_contentChecksum_invalid);
-                nextSrcSizeHint = 0;
-                dctxPtr->dStage = dstage_getHeader;
-                doAnotherStage = 0;
-                break;
-            }
-
-        case dstage_getSFrameSize:
-            if ((srcEnd - srcPtr) >= 4) {
-                selectedIn = srcPtr;
-                srcPtr += 4;
-            } else {
-                /* not enough input to read cBlockSize field */
-                dctxPtr->tmpInSize = 4;
-                dctxPtr->tmpInTarget = 8;
-                dctxPtr->dStage = dstage_storeSFrameSize;
-            }
-
-            if (dctxPtr->dStage == dstage_storeSFrameSize)
-        case dstage_storeSFrameSize:
-            {
-                size_t sizeToCopy = dctxPtr->tmpInTarget - dctxPtr->tmpInSize;
-                if (sizeToCopy > (size_t)(srcEnd - srcPtr)) sizeToCopy = srcEnd - srcPtr;
-                memcpy(dctxPtr->header + dctxPtr->tmpInSize, srcPtr, sizeToCopy);
-                srcPtr += sizeToCopy;
-                dctxPtr->tmpInSize += sizeToCopy;
-                if (dctxPtr->tmpInSize < dctxPtr->tmpInTarget) { /* not enough input to get full sBlockSize; wait for more */
-                    nextSrcSizeHint = dctxPtr->tmpInTarget - dctxPtr->tmpInSize;
-                    doAnotherStage = 0;
-                    break;
-                }
-                selectedIn = dctxPtr->header + 4;
-            }
-
-        /* case dstage_decodeSFrameSize: */   /* no direct access */
-            {   size_t const SFrameSize = LZ4F_readLE32(selectedIn);
-                dctxPtr->frameInfo.contentSize = SFrameSize;
-                dctxPtr->tmpInTarget = SFrameSize;
-                dctxPtr->dStage = dstage_skipSkippable;
-                break;
-            }
-
-        case dstage_skipSkippable:
-            {   size_t skipSize = dctxPtr->tmpInTarget;
-                if (skipSize > (size_t)(srcEnd-srcPtr)) skipSize = srcEnd-srcPtr;
-                srcPtr += skipSize;
-                dctxPtr->tmpInTarget -= skipSize;
-                doAnotherStage = 0;
-                nextSrcSizeHint = dctxPtr->tmpInTarget;
-                if (nextSrcSizeHint) break;
-                dctxPtr->dStage = dstage_getHeader;
-                break;
-            }
-        }
-    }
-
-    /* preserve dictionary within tmp if necessary */
-    if ( (dctxPtr->frameInfo.blockMode==LZ4F_blockLinked)
-        &&(dctxPtr->dict != dctxPtr->tmpOutBuffer)
-        &&(!decompressOptionsPtr->stableDst)
-        &&((unsigned)(dctxPtr->dStage-1) < (unsigned)(dstage_getSuffix-1))
-        )
-    {
-        if (dctxPtr->dStage == dstage_flushOut) {
-            size_t preserveSize = dctxPtr->tmpOut - dctxPtr->tmpOutBuffer;
-            size_t copySize = 64 KB - dctxPtr->tmpOutSize;
-            const BYTE* oldDictEnd = dctxPtr->dict + dctxPtr->dictSize - dctxPtr->tmpOutStart;
-            if (dctxPtr->tmpOutSize > 64 KB) copySize = 0;
-            if (copySize > preserveSize) copySize = preserveSize;
-
-            memcpy(dctxPtr->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
-
-            dctxPtr->dict = dctxPtr->tmpOutBuffer;
-            dctxPtr->dictSize = preserveSize + dctxPtr->tmpOutStart;
-        } else {
-            size_t newDictSize = dctxPtr->dictSize;
-            const BYTE* oldDictEnd = dctxPtr->dict + dctxPtr->dictSize;
-            if ((newDictSize) > 64 KB) newDictSize = 64 KB;
-
-            memcpy(dctxPtr->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
-
-            dctxPtr->dict = dctxPtr->tmpOutBuffer;
-            dctxPtr->dictSize = newDictSize;
-            dctxPtr->tmpOut = dctxPtr->tmpOutBuffer + newDictSize;
-        }
-    }
-
-    *srcSizePtr = (srcPtr - srcStart);
-    *dstSizePtr = (dstPtr - dstStart);
-    return nextSrcSizeHint;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/lz4frame.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/lz4frame.h b/thirdparty/librdkafka-0.11.1/src/lz4frame.h
deleted file mode 100644
index 76b4e69..0000000
--- a/thirdparty/librdkafka-0.11.1/src/lz4frame.h
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
-   LZ4 auto-framing library
-   Header File
-   Copyright (C) 2011-2017, Yann Collet.
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - LZ4 source repository : https://github.com/lz4/lz4
-   - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-/* LZ4F is a stand-alone API to create LZ4-compressed frames
- * conformant with specification v1.5.1.
- * It also offers streaming capabilities.
- * lz4.h is not required when using lz4frame.h.
- * */
-
-#ifndef LZ4F_H_09782039843
-#define LZ4F_H_09782039843
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* ---   Dependency   --- */
-#include <stddef.h>   /* size_t */
-
-
-/**
-  Introduction
-
-  lz4frame.h implements LZ4 frame specification (doc/lz4_Frame_format.md).
-  lz4frame.h provides frame compression functions that take care
-  of encoding standard metadata alongside LZ4-compressed blocks.
-*/
-
-/*-***************************************************************
- *  Compiler specifics
- *****************************************************************/
-/*  LZ4_DLL_EXPORT :
- *  Enable exporting of functions when building a Windows DLL
- *  LZ4FLIB_API :
- *  Control library symbols visibility.
- */
-#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
-#  define LZ4FLIB_API __declspec(dllexport)
-#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
-#  define LZ4FLIB_API __declspec(dllimport)
-#elif defined(__GNUC__) && (__GNUC__ >= 4)
-#  define LZ4FLIB_API __attribute__ ((__visibility__ ("default")))
-#else
-#  define LZ4FLIB_API
-#endif
-
-#if defined(_MSC_VER)
-#  define LZ4F_DEPRECATE(x) x   /* __declspec(deprecated) x - only works with C++ */
-#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 6))
-#  define LZ4F_DEPRECATE(x) x __attribute__((deprecated))
-#else
-#  define LZ4F_DEPRECATE(x) x   /* no deprecation warning for this compiler */
-#endif
-
-
-/*-************************************
- *  Error management
- **************************************/
-typedef size_t LZ4F_errorCode_t;
-
-LZ4FLIB_API unsigned    LZ4F_isError(LZ4F_errorCode_t code);   /**< tells if a `LZ4F_errorCode_t` function result is an error code */
-LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code);   /**< return error code string; useful for debugging */
-
-
-/*-************************************
- *  Frame compression types
- **************************************/
-/* #define LZ4F_DISABLE_OBSOLETE_ENUMS */  /* uncomment to disable obsolete enums */
-#ifndef LZ4F_DISABLE_OBSOLETE_ENUMS
-#  define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x
-#else
-#  define LZ4F_OBSOLETE_ENUM(x)
-#endif
-
-/* The larger the block size, the (slightly) better the compression ratio,
- * though there are diminishing returns.
- * Larger blocks also increase memory usage on both compression and decompression sides. */
-typedef enum {
-    LZ4F_default=0,
-    LZ4F_max64KB=4,
-    LZ4F_max256KB=5,
-    LZ4F_max1MB=6,
-    LZ4F_max4MB=7
-    LZ4F_OBSOLETE_ENUM(max64KB)
-    LZ4F_OBSOLETE_ENUM(max256KB)
-    LZ4F_OBSOLETE_ENUM(max1MB)
-    LZ4F_OBSOLETE_ENUM(max4MB)
-} LZ4F_blockSizeID_t;
-
-/* Linked blocks sharply reduce inefficiencies when using small blocks,
- * they compress better.
- * However, some LZ4 decoders are only compatible with independent blocks */
-typedef enum {
-    LZ4F_blockLinked=0,
-    LZ4F_blockIndependent
-    LZ4F_OBSOLETE_ENUM(blockLinked)
-    LZ4F_OBSOLETE_ENUM(blockIndependent)
-} LZ4F_blockMode_t;
-
-typedef enum {
-    LZ4F_noContentChecksum=0,
-    LZ4F_contentChecksumEnabled
-    LZ4F_OBSOLETE_ENUM(noContentChecksum)
-    LZ4F_OBSOLETE_ENUM(contentChecksumEnabled)
-} LZ4F_contentChecksum_t;
-
-typedef enum {
-    LZ4F_frame=0,
-    LZ4F_skippableFrame
-    LZ4F_OBSOLETE_ENUM(skippableFrame)
-} LZ4F_frameType_t;
-
-#ifndef LZ4F_DISABLE_OBSOLETE_ENUMS
-typedef LZ4F_blockSizeID_t blockSizeID_t;
-typedef LZ4F_blockMode_t blockMode_t;
-typedef LZ4F_frameType_t frameType_t;
-typedef LZ4F_contentChecksum_t contentChecksum_t;
-#endif
-
-/*! LZ4F_frameInfo_t :
- * makes it possible to supply detailed frame parameters to the stream interface.
- * It's not required to set all fields, as long as the structure was initially memset() to zero.
- * All reserved fields must be set to zero. */
-typedef struct {
-  LZ4F_blockSizeID_t     blockSizeID;           /* max64KB, max256KB, max1MB, max4MB ; 0 == default */
-  LZ4F_blockMode_t       blockMode;             /* blockLinked, blockIndependent ; 0 == default */
-  LZ4F_contentChecksum_t contentChecksumFlag;   /* noContentChecksum, contentChecksumEnabled ; 0 == default  */
-  LZ4F_frameType_t       frameType;             /* LZ4F_frame, skippableFrame ; 0 == default */
-  unsigned long long     contentSize;           /* Size of uncompressed (original) content ; 0 == unknown */
-  unsigned               reserved[2];           /* must be zero for forward compatibility */
-} LZ4F_frameInfo_t;
-
-/*! LZ4F_preferences_t :
- * makes it possible to supply detailed compression parameters to the stream interface.
- * It's not required to set all fields, as long as the structure was initially memset() to zero.
- * All reserved fields must be set to zero. */
-typedef struct {
-  LZ4F_frameInfo_t frameInfo;
-  int      compressionLevel;       /* 0 == default (fast mode); values above LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values below 0 count as 0 */
-  unsigned autoFlush;              /* 1 == always flush (reduce usage of tmp buffer) */
-  unsigned reserved[4];            /* must be zero for forward compatibility */
-} LZ4F_preferences_t;
-
-
-/*-*********************************
-*  Simple compression function
-***********************************/
-/*!LZ4F_compressFrameBound() :
- * Returns the maximum possible size of a frame compressed with LZ4F_compressFrame() given srcSize content and preferences.
- * Note : this result is only usable with LZ4F_compressFrame(), not with multi-segments compression.
- */
-LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
-
-/*!LZ4F_compressFrame() :
- * Compress an entire srcBuffer into a valid LZ4 frame, as defined by specification v1.5.1
- * An important rule is that dstBuffer MUST be large enough (dstCapacity) to store the result in worst case situation.
- * This value is supplied by LZ4F_compressFrameBound().
- * If this condition is not respected, LZ4F_compressFrame() will fail (result is an errorCode).
- * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
- * @return : number of bytes written into dstBuffer.
- *           or an error code if it fails (can be tested using LZ4F_isError())
- */
-LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
-
-
-
-/*-***********************************
-*  Advanced compression functions
-*************************************/
-typedef struct LZ4F_cctx_s LZ4F_cctx;   /* incomplete type */
-typedef LZ4F_cctx* LZ4F_compressionContext_t;   /* for compatibility with previous API version */
-
-typedef struct {
-  unsigned stableSrc;    /* 1 == src content will remain present on future calls to LZ4F_compress(); skip copying src content within tmp buffer */
-  unsigned reserved[3];
-} LZ4F_compressOptions_t;
-
-/*---   Resource Management   ---*/
-
-#define LZ4F_VERSION 100
-LZ4FLIB_API unsigned LZ4F_getVersion(void);
-/*! LZ4F_createCompressionContext() :
- * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
- * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version.
- * The version provided MUST be LZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL.
- * The function will provide a pointer to a fully allocated LZ4F_cctx object.
- * If @return != zero, there was an error during context creation.
- * Object can release its memory using LZ4F_freeCompressionContext();
- */
-LZ4FLIB_API LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** cctxPtr, unsigned version);
-LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx);
-
-
-/*----    Compression    ----*/
-
-#define LZ4F_HEADER_SIZE_MAX 15
-/*! LZ4F_compressBegin() :
- * will write the frame header into dstBuffer.
- * dstCapacity must be large enough to store the header. Maximum header size is LZ4F_HEADER_SIZE_MAX bytes.
- * `prefsPtr` is optional : you can provide NULL as argument, all preferences will then be set to default.
- * @return : number of bytes written into dstBuffer for the header
- *           or an error code (which can be tested using LZ4F_isError())
- */
-LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx, void* dstBuffer, size_t dstCapacity, const LZ4F_preferences_t* prefsPtr);
-
-/*! LZ4F_compressBound() :
- * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations.
- * prefsPtr is optional : you can provide NULL as argument, preferences will be set to cover worst case scenario.
- * Result is always the same for a srcSize and prefsPtr, so it can be trusted to size reusable buffers.
- * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations.
- */
-LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* prefsPtr);
-
-/*! LZ4F_compressUpdate() :
- * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
- * An important rule is that dstCapacity MUST be large enough to ensure operation success even in worst case situations.
- * This value is provided by LZ4F_compressBound().
- * If this condition is not respected, LZ4F_compress() will fail (result is an errorCode).
- * LZ4F_compressUpdate() doesn't guarantee error recovery. When an error occurs, compression context must be freed or resized.
- * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
- * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
- *           or an error code if it fails (which can be tested using LZ4F_isError())
- */
-LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx, void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize, const LZ4F_compressOptions_t* cOptPtr);
-
-/*! LZ4F_flush() :
- * When data must be generated and sent immediately, without waiting for a block to be completely filled,
- * it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx.
- * `dstCapacity` must be large enough to ensure the operation will be successful.
- * `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default.
- * @return : number of bytes written into dstBuffer (it can be zero, which means there was no data stored within cctx)
- *           or an error code if it fails (which can be tested using LZ4F_isError())
- */
-LZ4FLIB_API size_t LZ4F_flush(LZ4F_cctx* cctx, void* dstBuffer, size_t dstCapacity, const LZ4F_compressOptions_t* cOptPtr);
-
-/*! LZ4F_compressEnd() :
- * To properly finish an LZ4 frame, invoke LZ4F_compressEnd().
- * It will flush whatever data remained within `cctx` (like LZ4_flush())
- * and properly finalize the frame, with an endMark and a checksum.
- * `cOptPtr` is optional : NULL can be provided, in which case all options will be set to default.
- * @return : number of bytes written into dstBuffer (necessarily >= 4 (endMark), or 8 if optional frame checksum is enabled)
- *           or an error code if it fails (which can be tested using LZ4F_isError())
- * A successful call to LZ4F_compressEnd() makes `cctx` available again for another compression task.
- */
-LZ4FLIB_API size_t LZ4F_compressEnd(LZ4F_cctx* cctx, void* dstBuffer, size_t dstCapacity, const LZ4F_compressOptions_t* cOptPtr);
-
-
-/*-*********************************
-*  Decompression functions
-***********************************/
-typedef struct LZ4F_dctx_s LZ4F_dctx;   /* incomplete type */
-typedef LZ4F_dctx* LZ4F_decompressionContext_t;   /* compatibility with previous API versions */
-
-typedef struct {
-  unsigned stableDst;       /* guarantee that decompressed data will still be there on next function calls (avoid storage into tmp buffers) */
-  unsigned reserved[3];
-} LZ4F_decompressOptions_t;
-
-
-/* Resource management */
-
-/*!LZ4F_createDecompressionContext() :
- * Create an LZ4F_decompressionContext_t object, which will be used to track all decompression operations.
- * The version provided MUST be LZ4F_VERSION. It is intended to track potential breaking differences between different versions.
- * The function will provide a pointer to a fully allocated and initialized LZ4F_decompressionContext_t object.
- * The result is an errorCode, which can be tested using LZ4F_isError().
- * dctx memory can be released using LZ4F_freeDecompressionContext();
- * The result of LZ4F_freeDecompressionContext() is indicative of the current state of decompressionContext when being released.
- * That is, it should be == 0 if decompression has been completed fully and correctly.
- */
-LZ4FLIB_API LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version);
-LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx);
-
-
-/*-***********************************
-*  Streaming decompression functions
-*************************************/
-
-/*! LZ4F_getFrameInfo() :
- * This function extracts frame parameters (such as max blockSize, frame checksum, etc.).
- * Its usage is optional. Extracted information can be useful for allocation purposes, typically.
- * This function works in 2 situations :
- *   - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process.
- *     Input size must be large enough to successfully decode the entire frame header.
- *     Frame header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes.
- *     It's allowed to provide more input data than this minimum.
- *   - After decoding has been started.
- *     In which case, no input is read, frame parameters are extracted from dctx.
- *     If decoding has just started, but not yet extracted information from header, LZ4F_getFrameInfo() will fail.
- * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value).
- * Decompression must resume from (srcBuffer + *srcSizePtr).
- * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call,
- *           or an error code which can be tested using LZ4F_isError()
- * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped.
- * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
- */
-LZ4FLIB_API size_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
-                                     LZ4F_frameInfo_t* frameInfoPtr,
-                                     const void* srcBuffer, size_t* srcSizePtr);
-
-/*! LZ4F_decompress() :
- * Call this function repetitively to regenerate data compressed within `srcBuffer`.
- * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer, into dstBuffer of capacity *dstSizePtr.
- *
- * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value).
- *
- * The number of bytes read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value).
- * Number of bytes read can be < number of bytes provided, meaning there is some more data to decode.
- * It typically happens when dstBuffer is not large enough to contain all decoded data.
- * Remaining data will have to be presented again in a subsequent invocation.
- *
- * `dstBuffer` content is expected to be flushed between each invocation, as its content will be overwritten.
- * `dstBuffer` can be changed at will between each consecutive function invocation.
- *
- * @return is an hint of how many `srcSize` bytes LZ4F_decompress() expects for next call.
- * Schematically, it's the size of the current (or remaining) compressed block + header of next block.
- * Respecting the hint provides some small speed benefit, because it skips intermediate buffers.
- * This is just a hint though, it's always possible to provide any srcSize.
- * When a frame is fully decoded, @return will be 0 (no more data expected).
- * If decompression failed, @return is an error code, which can be tested using LZ4F_isError().
- *
- * After a frame is fully decoded, dctx can be used again to decompress another frame.
- * After a decompression error, use LZ4F_resetDecompressionContext() before re-using dctx, to return to clean state.
- */
-LZ4FLIB_API size_t LZ4F_decompress(LZ4F_dctx* dctx,
-                                   void* dstBuffer, size_t* dstSizePtr,
-                                   const void* srcBuffer, size_t* srcSizePtr,
-                                   const LZ4F_decompressOptions_t* dOptPtr);
-
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif  /* LZ4F_H_09782039843 */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/lz4frame_static.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/lz4frame_static.h b/thirdparty/librdkafka-0.11.1/src/lz4frame_static.h
deleted file mode 100644
index 8ea496d..0000000
--- a/thirdparty/librdkafka-0.11.1/src/lz4frame_static.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
-   LZ4 auto-framing library
-   Header File for static linking only
-   Copyright (C) 2011-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - LZ4 source repository : https://github.com/lz4/lz4
-   - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-#ifndef LZ4FRAME_STATIC_H_0398209384
-#define LZ4FRAME_STATIC_H_0398209384
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* lz4frame_static.h should be used solely in the context of static linking.
- * It contains definitions which are not stable and may change in the future.
- * Never use it in the context of DLL linking.
- */
-
-
-/* ---   Dependency   --- */
-#include "lz4frame.h"
-
-
-/* ---   Experimental functions   --- */
-/* LZ4F_resetDecompressionContext() :
- * LZ4F_decompress() does not guarantee to leave dctx in clean state in case of errors.
- * In order to re-use a dctx after a decompression error,
- * use LZ4F_resetDecompressionContext() first.
- * dctx will be able to start decompression on a new frame */
-LZ4FLIB_API LZ4F_errorCode_t LZ4F_resetDecompressionContext(LZ4F_dctx* dctx);
-
-
-/* ---   Error List   --- */
-#define LZ4F_LIST_ERRORS(ITEM) \
-        ITEM(OK_NoError) \
-        ITEM(ERROR_GENERIC) \
-        ITEM(ERROR_maxBlockSize_invalid) \
-        ITEM(ERROR_blockMode_invalid) \
-        ITEM(ERROR_contentChecksumFlag_invalid) \
-        ITEM(ERROR_compressionLevel_invalid) \
-        ITEM(ERROR_headerVersion_wrong) \
-        ITEM(ERROR_blockChecksum_unsupported) \
-        ITEM(ERROR_reservedFlag_set) \
-        ITEM(ERROR_allocation_failed) \
-        ITEM(ERROR_srcSize_tooLarge) \
-        ITEM(ERROR_dstMaxSize_tooSmall) \
-        ITEM(ERROR_frameHeader_incomplete) \
-        ITEM(ERROR_frameType_unknown) \
-        ITEM(ERROR_frameSize_wrong) \
-        ITEM(ERROR_srcPtr_wrong) \
-        ITEM(ERROR_decompressionFailed) \
-        ITEM(ERROR_headerChecksum_invalid) \
-        ITEM(ERROR_contentChecksum_invalid) \
-        ITEM(ERROR_frameDecoding_alreadyStarted) \
-        ITEM(ERROR_maxCode)
-
-#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM,
-
-/* enum list is exposed, to handle specific errors */
-typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM) } LZ4F_errorCodes;
-
-LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult);
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* LZ4FRAME_STATIC_H_0398209384 */


[37/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdbuf.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdbuf.c b/thirdparty/librdkafka-0.11.1/src/rdbuf.c
deleted file mode 100644
index b44ce59..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdbuf.c
+++ /dev/null
@@ -1,1547 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rd.h"
-#include "rdbuf.h"
-#include "rdunittest.h"
-#include "rdlog.h"
-#include "rdcrc32.h"
-#include "crc32c.h"
-
-
-static size_t
-rd_buf_get_writable0 (rd_buf_t *rbuf, rd_segment_t **segp, void **p);
-
-
-/**
- * @brief Destroy the segment and free its payload.
- *
- * @remark Will NOT unlink from buffer.
- */
-static void rd_segment_destroy (rd_segment_t *seg) {
-        /* Free payload */
-        if (seg->seg_free && seg->seg_p)
-                seg->seg_free(seg->seg_p);
-
-        if (seg->seg_flags & RD_SEGMENT_F_FREE)
-                rd_free(seg);
-}
-
-/**
- * @brief Initialize segment with absolute offset, backing memory pointer,
- *        and backing memory size.
- * @remark The segment is NOT linked.
- */
-static void rd_segment_init (rd_segment_t *seg, void *mem, size_t size) {
-        memset(seg, 0, sizeof(*seg));
-        seg->seg_p     = mem;
-        seg->seg_size  = size;
-}
-
-
-/**
- * @brief Append segment to buffer
- *
- * @remark Will set the buffer position to the new \p seg if no existing wpos.
- * @remark Will set the segment seg_absof to the current length of the buffer.
- */
-static rd_segment_t *rd_buf_append_segment (rd_buf_t *rbuf, rd_segment_t *seg) {
-        TAILQ_INSERT_TAIL(&rbuf->rbuf_segments, seg, seg_link);
-        rbuf->rbuf_segment_cnt++;
-        seg->seg_absof      = rbuf->rbuf_len;
-        rbuf->rbuf_len     += seg->seg_of;
-        rbuf->rbuf_size    += seg->seg_size;
-
-        /* Update writable position */
-        if (!rbuf->rbuf_wpos)
-                rbuf->rbuf_wpos = seg;
-        else
-                rd_buf_get_writable0(rbuf, NULL, NULL);
-
-        return seg;
-}
-
-
-
-
-/**
- * @brief Attempt to allocate \p size bytes from the buffers extra buffers.
- * @returns the allocated pointer which MUST NOT be freed, or NULL if
- *          not enough memory.
- * @remark the returned pointer is memory-aligned to be safe.
- */
-static void *extra_alloc (rd_buf_t *rbuf, size_t size) {
-        size_t of = RD_ROUNDUP(rbuf->rbuf_extra_len, 8); /* FIXME: 32-bit */
-        void *p;
-
-        if (of + size > rbuf->rbuf_extra_size)
-                return NULL;
-
-        p = rbuf->rbuf_extra + of; /* Aligned pointer */
-
-        rbuf->rbuf_extra_len = of + size;
-
-        return p;
-}
-
-
-
-/**
- * @brief Get a pre-allocated segment if available, or allocate a new
- *        segment with the extra amount of \p size bytes allocated for payload.
- *
- *        Will not append the segment to the buffer.
- */
-static rd_segment_t *
-rd_buf_alloc_segment0 (rd_buf_t *rbuf, size_t size) {
-        rd_segment_t *seg;
-
-        /* See if there is enough room in the extra buffer for
-         * allocating the segment header and the buffer,
-         * or just the segment header, else fall back to malloc. */
-        if ((seg = extra_alloc(rbuf, sizeof(*seg) + size))) {
-                rd_segment_init(seg, size > 0 ? seg+1 : NULL, size);
-
-        } else if ((seg = extra_alloc(rbuf, sizeof(*seg)))) {
-                rd_segment_init(seg, size > 0 ? rd_malloc(size) : NULL, size);
-                if (size > 0)
-                        seg->seg_free = rd_free;
-
-        } else if ((seg = rd_malloc(sizeof(*seg) + size))) {
-                rd_segment_init(seg, size > 0 ? seg+1 : NULL, size);
-                seg->seg_flags |= RD_SEGMENT_F_FREE;
-
-        } else
-                rd_assert(!*"segment allocation failure");
-
-        return seg;
-}
-
-/**
- * @brief Allocate between \p min_size .. \p max_size of backing memory
- *        and add it as a new segment to the buffer.
- *
- *        The buffer position is updated to point to the new segment.
- *
- *        The segment will be over-allocated if permitted by max_size
- *        (max_size == 0 or max_size > min_size).
- */
-static rd_segment_t *
-rd_buf_alloc_segment (rd_buf_t *rbuf, size_t min_size, size_t max_size) {
-        rd_segment_t *seg;
-
-        /* Over-allocate if allowed. */
-        if (min_size != max_size || max_size == 0)
-                max_size = RD_MAX(sizeof(*seg) * 4,
-                                  RD_MAX(min_size * 2,
-                                         rbuf->rbuf_size / 2));
-
-        seg = rd_buf_alloc_segment0(rbuf, max_size);
-
-        rd_buf_append_segment(rbuf, seg);
-
-        return seg;
-}
-
-
-/**
- * @brief Ensures that \p size bytes will be available
- *        for writing and the position will be updated to point to the
- *        start of this contiguous block.
- */
-void rd_buf_write_ensure_contig (rd_buf_t *rbuf, size_t size) {
-        rd_segment_t *seg = rbuf->rbuf_wpos;
-
-        if (seg) {
-                void *p;
-                size_t remains = rd_segment_write_remains(seg, &p);
-
-                if (remains >= size)
-                        return; /* Existing segment has enough space. */
-
-                /* Future optimization:
-                 * If existing segment has enough remaining space to warrant
-                 * a split, do it, before allocating a new one. */
-        }
-
-        /* Allocate new segment */
-        rbuf->rbuf_wpos = rd_buf_alloc_segment(rbuf, size, size);
-}
-
-/**
- * @brief Ensures that at least \p size bytes will be available for
- *        a future write.
- *
- *        Typically used prior to a call to rd_buf_get_write_iov()
- */
-void rd_buf_write_ensure (rd_buf_t *rbuf, size_t min_size, size_t max_size) {
-        size_t remains;
-        while ((remains = rd_buf_write_remains(rbuf)) < min_size)
-                rd_buf_alloc_segment(rbuf,
-                                     min_size - remains,
-                                     max_size ? max_size - remains : 0);
-}
-
-
-/**
- * @returns the segment at absolute offset \p absof, or NULL if out of range.
- *
- * @remark \p hint is an optional segment where to start looking, such as
- *         the current write or read position.
- */
-rd_segment_t *
-rd_buf_get_segment_at_offset (const rd_buf_t *rbuf, const rd_segment_t *hint,
-                              size_t absof) {
-        const rd_segment_t *seg = hint;
-
-        if (unlikely(absof > rbuf->rbuf_len))
-                return NULL;
-
-        /* Only use current write position if possible and if it helps */
-        if (!seg || absof < seg->seg_absof)
-                seg = TAILQ_FIRST(&rbuf->rbuf_segments);
-
-        do {
-                if (absof >= seg->seg_absof &&
-                    absof < seg->seg_absof + seg->seg_of) {
-                        rd_dassert(seg->seg_absof <= rd_buf_len(rbuf));
-                        return (rd_segment_t *)seg;
-                }
-        } while ((seg = TAILQ_NEXT(seg, seg_link)));
-
-        return NULL;
-}
-
-
-/**
- * @brief Split segment \p seg at absolute offset \p absof, appending
- *        a new segment after \p seg with its memory pointing to the
- *        memory starting at \p absof.
- *        \p seg 's memory will be shorted to the \p absof.
- *
- *        The new segment is NOT appended to the buffer.
- *
- * @warning MUST ONLY be used on the LAST segment
- *
- * @warning if a segment is inserted between these two splitted parts
- *          it is imperative that the later segment's absof is corrected.
- *
- * @remark The seg_free callback is retained on the original \p seg
- *         and is not copied to the new segment, but flags are copied.
- */
-static rd_segment_t *rd_segment_split (rd_buf_t *rbuf, rd_segment_t *seg,
-                                       size_t absof) {
-        rd_segment_t *newseg;
-        size_t relof;
-
-        rd_assert(seg == rbuf->rbuf_wpos);
-        rd_assert(absof >= seg->seg_absof &&
-                  absof <= seg->seg_absof + seg->seg_of);
-
-        relof = absof - seg->seg_absof;
-
-        newseg = rd_buf_alloc_segment0(rbuf, 0);
-
-        /* Add later part of split bytes to new segment */
-        newseg->seg_p      = seg->seg_p+relof;
-        newseg->seg_of     = seg->seg_of-relof;
-        newseg->seg_size   = seg->seg_size-relof;
-        newseg->seg_absof  = SIZE_MAX; /* Invalid */
-        newseg->seg_flags |= seg->seg_flags;
-
-        /* Remove earlier part of split bytes from previous segment */
-        seg->seg_of        = relof;
-        seg->seg_size      = relof;
-
-        /* newseg's length will be added to rbuf_len in append_segment(),
-         * so shave it off here from seg's perspective. */
-        rbuf->rbuf_len   -= newseg->seg_of;
-        rbuf->rbuf_size  -= newseg->seg_size;
-
-        return newseg;
-}
-
-
-
-
-/**
- * @brief Unlink and destroy a segment, updating the \p rbuf
- *        with the decrease in length and capacity.
- */
-static void rd_buf_destroy_segment (rd_buf_t *rbuf, rd_segment_t *seg) {
-        rd_assert(rbuf->rbuf_segment_cnt > 0 &&
-                  rbuf->rbuf_len >= seg->seg_of &&
-                  rbuf->rbuf_size >= seg->seg_size);
-
-        TAILQ_REMOVE(&rbuf->rbuf_segments, seg, seg_link);
-        rbuf->rbuf_segment_cnt--;
-        rbuf->rbuf_len  -= seg->seg_of;
-        rbuf->rbuf_size -= seg->seg_size;
-        rd_dassert(rbuf->rbuf_len <= seg->seg_absof);
-        if (rbuf->rbuf_wpos == seg)
-                rbuf->rbuf_wpos = NULL;
-
-        rd_segment_destroy(seg);
-}
-
-
-/**
- * @brief Free memory associated with the \p rbuf, but not the rbuf itself.
- *        Segments will be destroyed.
- */
-void rd_buf_destroy (rd_buf_t *rbuf) {
-        rd_segment_t *seg, *tmp;
-
-#if ENABLE_DEVEL
-        /* FIXME */
-        if (rbuf->rbuf_len > 0 && 0) {
-                size_t overalloc = rbuf->rbuf_size - rbuf->rbuf_len;
-                float fill_grade = (float)rbuf->rbuf_len /
-                        (float)rbuf->rbuf_size;
-
-                printf("fill grade: %.2f%% (%zu bytes over-allocated)\n",
-                       fill_grade * 100.0f, overalloc);
-        }
-#endif
-
-
-        TAILQ_FOREACH_SAFE(seg, &rbuf->rbuf_segments, seg_link, tmp) {
-                rd_segment_destroy(seg);
-
-        }
-
-        if (rbuf->rbuf_extra)
-                rd_free(rbuf->rbuf_extra);
-}
-
-
-/**
- * @brief Initialize buffer, pre-allocating \p fixed_seg_cnt segments
- *        where the first segment will have a \p buf_size of backing memory.
- *
- *        The caller may rearrange the backing memory as it see fits.
- */
-void rd_buf_init (rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size) {
-        size_t totalloc = 0;
-
-        memset(rbuf, 0, sizeof(*rbuf));
-        TAILQ_INIT(&rbuf->rbuf_segments);
-
-        if (!fixed_seg_cnt) {
-                assert(!buf_size);
-                return;
-        }
-
-        /* Pre-allocate memory for a fixed set of segments that are known
-         * before-hand, to minimize the number of extra allocations
-         * needed for well-known layouts (such as headers, etc) */
-        totalloc += RD_ROUNDUP(sizeof(rd_segment_t), 8) * fixed_seg_cnt;
-
-        /* Pre-allocate extra space for the backing buffer. */
-        totalloc += buf_size;
-
-        rbuf->rbuf_extra_size = totalloc;
-        rbuf->rbuf_extra = rd_malloc(rbuf->rbuf_extra_size);
-}
-
-
-
-
-/**
- * @brief Convenience writer iterator interface.
- *
- *        After writing to \p p the caller must update the written length
- *        by calling rd_buf_write(rbuf, NULL, written_length)
- *
- * @returns the number of contiguous writable bytes in segment
- *          and sets \p *p to point to the start of the memory region.
- */
-static size_t
-rd_buf_get_writable0 (rd_buf_t *rbuf, rd_segment_t **segp, void **p) {
-        rd_segment_t *seg;
-
-        for (seg = rbuf->rbuf_wpos ; seg ; seg = TAILQ_NEXT(seg, seg_link)) {
-                size_t len = rd_segment_write_remains(seg, p);
-
-                /* Even though the write offset hasn't changed we
-                 * avoid future segment scans by adjusting the
-                 * wpos here to the first writable segment. */
-                rbuf->rbuf_wpos = seg;
-                if (segp)
-                        *segp = seg;
-
-                if (unlikely(len == 0))
-                        continue;
-
-                /* Also adjust absof if the segment was allocated
-                 * before the previous segment's memory was exhausted
-                 * and thus now might have a lower absolute offset
-                 * than the previos segment's now higher relative offset. */
-                if (seg->seg_of == 0 && seg->seg_absof < rbuf->rbuf_len)
-                        seg->seg_absof = rbuf->rbuf_len;
-
-                return len;
-        }
-
-        return 0;
-}
-
-size_t rd_buf_get_writable (rd_buf_t *rbuf, void **p) {
-        rd_segment_t *seg;
-        return rd_buf_get_writable0(rbuf, &seg, p);
-}
-
-
-
-
-/**
- * @brief Write \p payload of \p size bytes to current position
- *        in buffer. A new segment will be allocated and appended
- *        if needed.
- *
- * @returns the write position where payload was written (pre-write).
- *          Returning the pre-positition allows write_update() to later
- *          update the same location, effectively making write()s
- *          also a place-holder mechanism.
- *
- * @remark If \p payload is NULL only the write position is updated,
- *         in this mode it is required for the buffer to have enough
- *         memory for the NULL write (as it would otherwise cause
- *         uninitialized memory in any new segments allocated from this
- *         function).
- */
-size_t rd_buf_write (rd_buf_t *rbuf, const void *payload, size_t size) {
-        size_t remains = size;
-        size_t initial_absof;
-        const char *psrc = (const char *)payload;
-
-        initial_absof = rbuf->rbuf_len;
-
-        /* Ensure enough space by pre-allocating segments. */
-        rd_buf_write_ensure(rbuf, size, 0);
-
-        while (remains > 0) {
-                void *p;
-                rd_segment_t *seg;
-                size_t segremains = rd_buf_get_writable0(rbuf, &seg, &p);
-                size_t wlen = RD_MIN(remains, segremains);
-
-                rd_dassert(seg == rbuf->rbuf_wpos);
-                rd_dassert(wlen > 0);
-                rd_dassert(seg->seg_p+seg->seg_of <= (char *)p &&
-                           (char *)p < seg->seg_p+seg->seg_size);
-
-                if (payload) {
-                        memcpy(p, psrc, wlen);
-                        psrc += wlen;
-                }
-
-                seg->seg_of    += wlen;
-                rbuf->rbuf_len += wlen;
-                remains        -= wlen;
-        }
-
-        rd_assert(remains == 0);
-
-        return initial_absof;
-}
-
-
-
-/**
- * @brief Write \p slice to \p rbuf
- *
- * @remark The slice position will be updated.
- *
- * @returns the number of bytes witten (always slice length)
- */
-size_t rd_buf_write_slice (rd_buf_t *rbuf, rd_slice_t *slice) {
-        const void *p;
-        size_t rlen;
-        size_t sum = 0;
-
-        while ((rlen = rd_slice_reader(slice, &p))) {
-                size_t r;
-                r = rd_buf_write(rbuf, p, rlen);
-                rd_dassert(r != 0);
-                sum += r;
-        }
-
-        return sum;
-}
-
-
-
-/**
- * @brief Write \p payload of \p size at absolute offset \p absof
- *        WITHOUT updating the total buffer length.
- *
- *        This is used to update a previously written region, such
- *        as updating the header length.
- *
- * @returns the number of bytes written, which may be less than \p size
- *          if the update spans multiple segments.
- */
-static size_t rd_segment_write_update (rd_segment_t *seg, size_t absof,
-                                       const void *payload, size_t size) {
-        size_t relof;
-        size_t wlen;
-
-        rd_dassert(absof >= seg->seg_absof);
-        relof = absof - seg->seg_absof;
-        rd_assert(relof <= seg->seg_of);
-        wlen = RD_MIN(size, seg->seg_of - relof);
-        rd_dassert(relof + wlen <= seg->seg_of);
-
-        memcpy(seg->seg_p+relof, payload, wlen);
-
-        return wlen;
-}
-
-
-
-/**
- * @brief Write \p payload of \p size at absolute offset \p absof
- *        WITHOUT updating the total buffer length.
- *
- *        This is used to update a previously written region, such
- *        as updating the header length.
- */
-size_t rd_buf_write_update (rd_buf_t *rbuf, size_t absof,
-                            const void *payload, size_t size) {
-        rd_segment_t *seg;
-        const char *psrc = (const char *)payload;
-        size_t of;
-
-        /* Find segment for offset */
-        seg = rd_buf_get_segment_at_offset(rbuf, rbuf->rbuf_wpos, absof);
-        rd_assert(seg && *"invalid absolute offset");
-
-        for (of = 0 ; of < size ; seg = TAILQ_NEXT(seg, seg_link)) {
-                rd_assert(seg->seg_absof <= rd_buf_len(rbuf));
-                size_t wlen = rd_segment_write_update(seg, absof+of,
-                                                      psrc+of, size-of);
-                of += wlen;
-        }
-
-        rd_dassert(of == size);
-
-        return of;
-}
-
-
-
-/**
- * @brief Push reference memory segment to current write position.
- */
-void rd_buf_push (rd_buf_t *rbuf, const void *payload, size_t size,
-                  void (*free_cb)(void *)) {
-        rd_segment_t *prevseg, *seg, *tailseg = NULL;
-
-        if ((prevseg = rbuf->rbuf_wpos) &&
-            rd_segment_write_remains(prevseg, NULL) > 0) {
-                /* If the current segment still has room in it split it
-                 * and insert the pushed segment in the middle (below). */
-                tailseg = rd_segment_split(rbuf, prevseg,
-                                           prevseg->seg_absof +
-                                           prevseg->seg_of);
-        }
-
-        seg = rd_buf_alloc_segment0(rbuf, 0);
-        seg->seg_p      = (char *)payload;
-        seg->seg_size   = size;
-        seg->seg_of     = size;
-        seg->seg_free   = free_cb;
-        seg->seg_flags |= RD_SEGMENT_F_RDONLY;
-
-        rd_buf_append_segment(rbuf, seg);
-
-        if (tailseg)
-                rd_buf_append_segment(rbuf, tailseg);
-}
-
-
-
-
-
-
-
-/**
- * @brief Do a write-seek, updating the write position to the given
- *        absolute \p absof.
- *
- * @warning Any sub-sequent segments will be destroyed.
- *
- * @returns -1 if the offset is out of bounds, else 0.
- */
-int rd_buf_write_seek (rd_buf_t *rbuf, size_t absof) {
-        rd_segment_t *seg, *next;
-        size_t relof;
-
-        seg = rd_buf_get_segment_at_offset(rbuf, rbuf->rbuf_wpos, absof);
-        if (unlikely(!seg))
-                return -1;
-
-        relof = absof - seg->seg_absof;
-        if (unlikely(relof > seg->seg_of))
-                return -1;
-
-        /* Destroy sub-sequent segments in reverse order so that
-         * destroy_segment() length checks are correct.
-         * Will decrement rbuf_len et.al. */
-        for (next = TAILQ_LAST(&rbuf->rbuf_segments, rd_segment_head) ;
-             next != seg ; next = TAILQ_PREV(next, rd_segment_head, seg_link))
-                rd_buf_destroy_segment(rbuf, next);
-
-        /* Update relative write offset */
-        seg->seg_of         = relof;
-        rbuf->rbuf_wpos     = seg;
-        rbuf->rbuf_len      = seg->seg_absof + seg->seg_of;
-
-        rd_assert(rbuf->rbuf_len == absof);
-
-        return 0;
-}
-
-
-/**
- * @brief Set up the iovecs in \p iovs (of size \p iov_max) with the writable
- *        segments from the buffer's current write position.
- *
- * @param iovcntp will be set to the number of populated \p iovs[]
- * @param size_max limits the total number of bytes made available.
- *                 Note: this value may be overshot with the size of one
- *                       segment.
- *
- * @returns the total number of bytes in the represented segments.
- *
- * @remark the write position will NOT be updated.
- */
-size_t rd_buf_get_write_iov (const rd_buf_t *rbuf,
-                             struct iovec *iovs, size_t *iovcntp,
-                             size_t iov_max, size_t size_max) {
-        const rd_segment_t *seg;
-        size_t iovcnt = 0;
-        size_t sum = 0;
-
-        for (seg = rbuf->rbuf_wpos ;
-             seg && iovcnt < iov_max && sum < size_max ;
-             seg = TAILQ_NEXT(seg, seg_link)) {
-                size_t len;
-                void *p;
-
-                len = rd_segment_write_remains(seg, &p);
-                if (unlikely(len == 0))
-                        continue;
-
-                iovs[iovcnt].iov_base  = p;
-                iovs[iovcnt++].iov_len = len;
-
-                sum += len;
-        }
-
-        *iovcntp = iovcnt;
-
-        return sum;
-}
-
-
-
-
-
-
-
-
-
-
-
-/**
- * @name Slice reader interface
- *
- * @{
- */
-
-/**
- * @brief Initialize a new slice of \p size bytes starting at \p seg with
- *        relative offset \p rof.
- *
- * @returns 0 on success or -1 if there is not at least \p size bytes available
- *          in the buffer.
- */
-int rd_slice_init_seg (rd_slice_t *slice, const rd_buf_t *rbuf,
-                           const rd_segment_t *seg, size_t rof, size_t size) {
-        /* Verify that \p size bytes are indeed available in the buffer. */
-        if (unlikely(rbuf->rbuf_len < (seg->seg_absof + rof + size)))
-                return -1;
-
-        slice->buf    = rbuf;
-        slice->seg    = seg;
-        slice->rof    = rof;
-        slice->start  = seg->seg_absof + rof;
-        slice->end    = slice->start + size;
-
-        rd_assert(seg->seg_absof+rof >= slice->start &&
-                  seg->seg_absof+rof <= slice->end);
-
-        rd_assert(slice->end <= rd_buf_len(rbuf));
-
-        return 0;
-}
-
-/**
- * @brief Initialize new slice of \p size bytes starting at offset \p absof
- *
- * @returns 0 on success or -1 if there is not at least \p size bytes available
- *          in the buffer.
- */
-int rd_slice_init (rd_slice_t *slice, const rd_buf_t *rbuf,
-                   size_t absof, size_t size) {
-        const rd_segment_t *seg = rd_buf_get_segment_at_offset(rbuf, NULL,
-                                                               absof);
-        if (unlikely(!seg))
-                return -1;
-
-        return rd_slice_init_seg(slice, rbuf, seg,
-                                 absof - seg->seg_absof, size);
-}
-
-/**
- * @brief Initialize new slice covering the full buffer \p rbuf
- */
-void rd_slice_init_full (rd_slice_t *slice, const rd_buf_t *rbuf) {
-        int r = rd_slice_init(slice, rbuf, 0, rd_buf_len(rbuf));
-        rd_assert(r == 0);
-}
-
-
-
-/**
- * @sa rd_slice_reader() rd_slice_peeker()
- */
-size_t rd_slice_reader0 (rd_slice_t *slice, const void **p, int update_pos) {
-        size_t rof = slice->rof;
-        size_t rlen;
-        const rd_segment_t *seg;
-
-        /* Find segment with non-zero payload */
-        for (seg = slice->seg ;
-             seg && seg->seg_absof+rof < slice->end && seg->seg_of == rof ;
-              seg = TAILQ_NEXT(seg, seg_link))
-                rof = 0;
-
-        if (unlikely(!seg || seg->seg_absof+rof >= slice->end))
-                return 0;
-
-        rd_assert(seg->seg_absof+rof <= slice->end);
-
-
-        *p   = (const void *)(seg->seg_p + rof);
-        rlen = RD_MIN(seg->seg_of - rof, rd_slice_remains(slice));
-
-        if (update_pos) {
-                if (slice->seg != seg) {
-                        rd_assert(seg->seg_absof + rof >= slice->start &&
-                                  seg->seg_absof + rof+rlen <= slice->end);
-                        slice->seg  = seg;
-                        slice->rof  = rlen;
-                } else {
-                        slice->rof += rlen;
-                }
-        }
-
-        return rlen;
-}
-
-
-/**
- * @brief Convenience reader iterator interface.
- *
- *        Call repeatedly from while loop until it returns 0.
- *
- * @param slice slice to read from, position will be updated.
- * @param p will be set to the start of \p *rlenp contiguous bytes of memory
- * @param rlenp will be set to the number of bytes available in \p p
- *
- * @returns the number of bytes read, or 0 if slice is empty.
- */
-size_t rd_slice_reader (rd_slice_t *slice, const void **p) {
-        return rd_slice_reader0(slice, p, 1/*update_pos*/);
-}
-
-/**
- * @brief Identical to rd_slice_reader() but does NOT update the read position
- */
-size_t rd_slice_peeker (const rd_slice_t *slice, const void **p) {
-        return rd_slice_reader0((rd_slice_t *)slice, p, 0/*dont update_pos*/);
-}
-
-
-
-
-
-/**
- * @brief Read \p size bytes from current read position,
- *        advancing the read offset by the number of bytes copied to \p dst.
- *
- *        If there are less than \p size remaining in the buffer
- *        then 0 is returned and no bytes are copied.
- *
- * @returns \p size, or 0 if \p size bytes are not available in buffer.
- *
- * @remark This performs a complete read, no partitial reads.
- *
- * @remark If \p dst is NULL only the read position is updated.
- */
-size_t rd_slice_read (rd_slice_t *slice, void *dst, size_t size) {
-        size_t remains = size;
-        char *d = (char *)dst; /* Possibly NULL */
-        size_t rlen;
-        const void *p;
-        size_t orig_end = slice->end;
-
-        if (unlikely(rd_slice_remains(slice) < size))
-                return 0;
-
-        /* Temporarily shrink slice to offset + \p size */
-        slice->end = rd_slice_abs_offset(slice) + size;
-
-        while ((rlen = rd_slice_reader(slice, &p))) {
-                rd_dassert(remains >= rlen);
-                if (dst) {
-                        memcpy(d, p, rlen);
-                        d       += rlen;
-                }
-                remains -= rlen;
-        }
-
-        rd_dassert(remains == 0);
-
-        /* Restore original size */
-        slice->end = orig_end;
-
-        return size;
-}
-
-
-/**
- * @brief Read \p size bytes from absolute slice offset \p offset
- *        and store in \p dst, without updating the slice read position.
- *
- * @returns \p size if the offset and size was within the slice, else 0.
- */
-size_t rd_slice_peek (const rd_slice_t *slice, size_t offset,
-                      void *dst, size_t size) {
-        rd_slice_t sub = *slice;
-
-        if (unlikely(rd_slice_seek(&sub, offset) == -1))
-                return 0;
-
-        return rd_slice_read(&sub, dst, size);
-
-}
-
-
-
-/**
- * @returns a pointer to \p size contiguous bytes at the current read offset.
- *          If there isn't \p size contiguous bytes available NULL will
- *          be returned.
- *
- * @remark The read position is updated to point past \p size.
- */
-const void *rd_slice_ensure_contig (rd_slice_t *slice, size_t size) {
-        void *p;
-
-        if (unlikely(rd_slice_remains(slice) < size ||
-                     slice->rof + size > slice->seg->seg_of))
-                return NULL;
-
-        p = slice->seg->seg_p + slice->rof;
-
-        rd_slice_read(slice, NULL, size);
-
-        return p;
-}
-
-
-
-/**
- * @brief Sets the slice's read position. The offset is the slice offset,
- *        not buffer offset.
- *
- * @returns 0 if offset was within range, else -1 in which case the position
- *          is not changed.
- */
-int rd_slice_seek (rd_slice_t *slice, size_t offset) {
-        const rd_segment_t *seg;
-        size_t absof = slice->start + offset;
-
-        if (unlikely(absof >= slice->end))
-                return -1;
-
-        seg = rd_buf_get_segment_at_offset(slice->buf, slice->seg, absof);
-        rd_assert(seg);
-
-        slice->seg = seg;
-        slice->rof = absof - seg->seg_absof;
-        rd_assert(seg->seg_absof + slice->rof >= slice->start &&
-                  seg->seg_absof + slice->rof <= slice->end);
-
-        return 0;
-}
-
-
-/**
- * @brief Narrow the current slice to \p size, saving
- *        the original slice state info \p save_slice.
- *
- *        Use rd_slice_widen() to restore the saved slice
- *        with the read count updated from the narrowed slice.
- *
- *        This is useful for reading a sub-slice of a larger slice
- *        without having to pass the lesser length around.
- *
- * @returns 1 if enough underlying slice buffer memory is available, else 0.
- */
-int rd_slice_narrow (rd_slice_t *slice, rd_slice_t *save_slice, size_t size) {
-        if (unlikely(slice->start + size > slice->end))
-                return 0;
-        *save_slice = *slice;
-        slice->end = slice->start + size;
-        rd_assert(rd_slice_abs_offset(slice) <= slice->end);
-        return 1;
-}
-
-/**
- * @brief Same as rd_slice_narrow() but using a relative size \p relsize
- *        from the current read position.
- */
-int rd_slice_narrow_relative (rd_slice_t *slice, rd_slice_t *save_slice,
-                               size_t relsize) {
-        return rd_slice_narrow(slice, save_slice,
-                               rd_slice_offset(slice) + relsize);
-}
-
-
-/**
- * @brief Restore the original \p save_slice size from a previous call to
- *        rd_slice_narrow(), while keeping the updated read pointer from
- *        \p slice.
- */
-void rd_slice_widen (rd_slice_t *slice, const rd_slice_t *save_slice) {
-        slice->end = save_slice->end;
-}
-
-
-/**
- * @brief Copy the original slice \p orig to \p new_slice and adjust
- *        the new slice length to \p size.
- *
- *        This is a side-effect free form of rd_slice_narrow() which is not to
- *        be used with rd_slice_widen().
- *
- * @returns 1 if enough underlying slice buffer memory is available, else 0.
- */
-int rd_slice_narrow_copy (const rd_slice_t *orig, rd_slice_t *new_slice,
-                          size_t size) {
-        if (unlikely(orig->start + size > orig->end))
-                return 0;
-        *new_slice = *orig;
-        new_slice->end = orig->start + size;
-        rd_assert(rd_slice_abs_offset(new_slice) <= new_slice->end);
-        return 1;
-}
-
-/**
- * @brief Same as rd_slice_narrow_copy() but with a relative size from
- *        the current read position.
- */
-int rd_slice_narrow_copy_relative (const rd_slice_t *orig,
-                                    rd_slice_t *new_slice,
-                                    size_t relsize) {
-        return rd_slice_narrow_copy(orig, new_slice,
-                                    rd_slice_offset(orig) + relsize);
-}
-
-
-
-
-
-/**
- * @brief Set up the iovec \p iovs (of size \p iov_max) with the readable
- *        segments from the slice's current read position.
- *
- * @param iovcntp will be set to the number of populated \p iovs[]
- * @param size_max limits the total number of bytes made available.
- *                 Note: this value may be overshot with the size of one
- *                       segment.
- *
- * @returns the total number of bytes in the represented segments.
- *
- * @remark will NOT update the read position.
- */
-size_t rd_slice_get_iov (const rd_slice_t *slice,
-                         struct iovec *iovs, size_t *iovcntp,
-                         size_t iov_max, size_t size_max) {
-        const void *p;
-        size_t rlen;
-        size_t iovcnt = 0;
-        size_t sum = 0;
-        rd_slice_t copy = *slice; /* Use a copy of the slice so we dont
-                                   * update the position for the caller. */
-
-        while (sum < size_max && iovcnt < iov_max &&
-               (rlen = rd_slice_reader(&copy, &p))) {
-                iovs[iovcnt].iov_base  = (void *)p;
-                iovs[iovcnt++].iov_len = rlen;
-
-                sum += rlen;
-        }
-
-        *iovcntp = iovcnt;
-
-        return sum;
-}
-
-
-
-
-
-/**
- * @brief CRC32 calculation of slice.
- *
- * @returns the calculated CRC
- *
- * @remark the slice's position is updated.
- */
-uint32_t rd_slice_crc32 (rd_slice_t *slice) {
-        rd_crc32_t crc;
-        const void *p;
-        size_t rlen;
-
-        crc = rd_crc32_init();
-
-        while ((rlen = rd_slice_reader(slice, &p)))
-                crc = rd_crc32_update(crc, p, rlen);
-
-        return (uint32_t)rd_crc32_finalize(crc);
-}
-
-/**
- * @brief Compute CRC-32C of segments starting at at buffer position \p absof,
- *        also supporting the case where the position/offset is not at the
- *        start of the first segment.
- *
- * @remark the slice's position is updated.
- */
-uint32_t rd_slice_crc32c (rd_slice_t *slice) {
-        const void *p;
-        size_t rlen;
-        uint32_t crc = 0;
-
-        while ((rlen = rd_slice_reader(slice, &p)))
-                crc = crc32c(crc, (const char *)p, rlen);
-
-        return crc;
-}
-
-
-
-
-
-/**
- * @name Debugging dumpers
- *
- *
- */
-
-static void rd_segment_dump (const rd_segment_t *seg, const char *ind,
-                             size_t relof, int do_hexdump) {
-        fprintf(stderr,
-                "%s((rd_segment_t *)%p): "
-                "p %p, of %"PRIusz", "
-                "absof %"PRIusz", size %"PRIusz", free %p, flags 0x%x\n",
-                ind, seg, seg->seg_p, seg->seg_of,
-                seg->seg_absof, seg->seg_size, seg->seg_free, seg->seg_flags);
-        rd_assert(relof <= seg->seg_of);
-        if (do_hexdump)
-                rd_hexdump(stderr, "segment",
-                           seg->seg_p+relof, seg->seg_of-relof);
-}
-
-void rd_buf_dump (const rd_buf_t *rbuf, int do_hexdump) {
-        const rd_segment_t *seg;
-
-        fprintf(stderr,
-                "((rd_buf_t *)%p):\n"
-                " len %"PRIusz" size %"PRIusz
-                ", %"PRIusz"/%"PRIusz" extra memory used\n",
-                rbuf, rbuf->rbuf_len, rbuf->rbuf_size,
-                rbuf->rbuf_extra_len, rbuf->rbuf_extra_size);
-
-        if (rbuf->rbuf_wpos) {
-                fprintf(stderr, " wpos:\n");
-                rd_segment_dump(rbuf->rbuf_wpos, "  ", 0, 0);
-        }
-
-        if (rbuf->rbuf_segment_cnt > 0) {
-                size_t segcnt = 0;
-
-                fprintf(stderr, " %"PRIusz" linked segments:\n",
-                        rbuf->rbuf_segment_cnt);
-                TAILQ_FOREACH(seg, &rbuf->rbuf_segments, seg_link) {
-                        rd_segment_dump(seg, "  ", 0, do_hexdump);
-                        rd_assert(++segcnt <= rbuf->rbuf_segment_cnt);
-                }
-        }
-}
-
-void rd_slice_dump (const rd_slice_t *slice, int do_hexdump) {
-        const rd_segment_t *seg;
-        size_t relof;
-
-        fprintf(stderr,
-                "((rd_slice_t *)%p):\n"
-                "  buf %p (len %"PRIusz"), seg %p (absof %"PRIusz"), "
-                "rof %"PRIusz", start %"PRIusz", end %"PRIusz", size %"PRIusz
-                ", offset %"PRIusz"\n",
-                slice, slice->buf, rd_buf_len(slice->buf),
-                slice->seg, slice->seg ? slice->seg->seg_absof : 0,
-                slice->rof, slice->start, slice->end,
-                rd_slice_size(slice), rd_slice_offset(slice));
-        relof = slice->rof;
-
-        for (seg = slice->seg ; seg ; seg = TAILQ_NEXT(seg, seg_link)) {
-                rd_segment_dump(seg, "  ", relof, do_hexdump);
-                relof = 0;
-        }
-}
-
-
-/**
- * @name Unit-tests
- *
- *
- *
- */
-
-
-/**
- * @brief Basic write+read test
- */
-static int do_unittest_write_read (void) {
-        rd_buf_t b;
-        char ones[1024];
-        char twos[1024];
-        char threes[1024];
-        char fiftyfives[100]; /* 0x55 indicates "untouched" memory */
-        char buf[1024*3];
-        rd_slice_t slice;
-        size_t r, pos;
-
-        memset(ones, 0x1, sizeof(ones));
-        memset(twos, 0x2, sizeof(twos));
-        memset(threes, 0x3, sizeof(threes));
-        memset(fiftyfives, 0x55, sizeof(fiftyfives));
-        memset(buf, 0x55, sizeof(buf));
-
-        rd_buf_init(&b, 2, 1000);
-
-        /*
-         * Verify write
-         */
-        r = rd_buf_write(&b, ones, 200);
-        RD_UT_ASSERT(r == 0, "write() returned position %"PRIusz, r);
-        pos = rd_buf_write_pos(&b);
-        RD_UT_ASSERT(pos == 200, "pos() returned position %"PRIusz, pos);
-
-        r = rd_buf_write(&b, twos, 800);
-        RD_UT_ASSERT(pos == 200, "write() returned position %"PRIusz, r);
-        pos = rd_buf_write_pos(&b);
-        RD_UT_ASSERT(pos == 200+800, "pos() returned position %"PRIusz, pos);
-
-        /* Buffer grows here */
-        r = rd_buf_write(&b, threes, 1);
-        RD_UT_ASSERT(pos == 200+800,
-                     "write() returned position %"PRIusz, r);
-        pos = rd_buf_write_pos(&b);
-        RD_UT_ASSERT(pos == 200+800+1, "pos() returned position %"PRIusz, pos);
-
-        /*
-         * Verify read
-         */
-        /* Get full slice. */
-        rd_slice_init_full(&slice, &b);
-
-        r = rd_slice_read(&slice, buf, 200+800+2);
-        RD_UT_ASSERT(r == 0,
-                     "read() > remaining should have failed, gave %"PRIusz, r);
-        r = rd_slice_read(&slice, buf, 200+800+1);
-        RD_UT_ASSERT(r == 200+800+1,
-                     "read() returned %"PRIusz" (%"PRIusz" remains)",
-                     r, rd_slice_remains(&slice));
-
-        RD_UT_ASSERT(!memcmp(buf, ones, 200), "verify ones");
-        RD_UT_ASSERT(!memcmp(buf+200, twos, 800), "verify twos");
-        RD_UT_ASSERT(!memcmp(buf+200+800, threes, 1), "verify threes");
-        RD_UT_ASSERT(!memcmp(buf+200+800+1, fiftyfives, 100), "verify 55s");
-
-        rd_buf_destroy(&b);
-
-        RD_UT_PASS();
-}
-
-
-/**
- * @brief Helper read verifier, not a unit-test itself.
- */
-#define do_unittest_read_verify(b,absof,len,verify) do {                \
-                int __fail = do_unittest_read_verify0(b,absof,len,verify); \
-                RD_UT_ASSERT(!__fail,                                   \
-                             "read_verify(absof=%"PRIusz",len=%"PRIusz") " \
-                             "failed", (size_t)absof, (size_t)len);     \
-        } while (0)
-
-static int
-do_unittest_read_verify0 (const rd_buf_t *b, size_t absof, size_t len,
-                          const char *verify) {
-        rd_slice_t slice, sub;
-        char buf[1024];
-        size_t half;
-        size_t r;
-        int i;
-
-        rd_assert(sizeof(buf) >= len);
-
-        /* Get reader slice */
-        i = rd_slice_init(&slice, b, absof, len);
-        RD_UT_ASSERT(i == 0, "slice_init() failed: %d", i);
-
-        r = rd_slice_read(&slice, buf, len);
-        RD_UT_ASSERT(r == len,
-                     "read() returned %"PRIusz" expected %"PRIusz
-                     " (%"PRIusz" remains)",
-                     r, len, rd_slice_remains(&slice));
-
-        RD_UT_ASSERT(!memcmp(buf, verify, len), "verify");
-
-        r = rd_slice_offset(&slice);
-        RD_UT_ASSERT(r == len, "offset() returned %"PRIusz", not %"PRIusz,
-                     r, len);
-
-        half = len / 2;
-        i = rd_slice_seek(&slice, half);
-        RD_UT_ASSERT(i == 0, "seek(%"PRIusz") returned %d", half, i);
-        r = rd_slice_offset(&slice);
-        RD_UT_ASSERT(r == half, "offset() returned %"PRIusz", not %"PRIusz,
-                     r, half);
-
-        /* Get a sub-slice covering the later half. */
-        sub = rd_slice_pos(&slice);
-        r = rd_slice_offset(&sub);
-        RD_UT_ASSERT(r == 0, "sub: offset() returned %"PRIusz", not %"PRIusz,
-                     r, (size_t)0);
-        r = rd_slice_size(&sub);
-        RD_UT_ASSERT(r == half, "sub: size() returned %"PRIusz", not %"PRIusz,
-                     r, half);
-        r = rd_slice_remains(&sub);
-        RD_UT_ASSERT(r == half,
-                     "sub: remains() returned %"PRIusz", not %"PRIusz,
-                     r, half);
-
-        /* Read half */
-        r = rd_slice_read(&sub, buf, half);
-        RD_UT_ASSERT(r == half,
-                     "sub read() returned %"PRIusz" expected %"PRIusz
-                     " (%"PRIusz" remains)",
-                     r, len, rd_slice_remains(&sub));
-
-        RD_UT_ASSERT(!memcmp(buf, verify, len), "verify");
-
-        r = rd_slice_offset(&sub);
-        RD_UT_ASSERT(r == rd_slice_size(&sub),
-                     "sub offset() returned %"PRIusz", not %"PRIusz,
-                     r, rd_slice_size(&sub));
-        r = rd_slice_remains(&sub);
-        RD_UT_ASSERT(r == 0,
-                     "sub: remains() returned %"PRIusz", not %"PRIusz,
-                     r, (size_t)0);
-
-        return 0;
-}
-
-
-/**
- * @brief write_seek() and split() test
- */
-static int do_unittest_write_split_seek (void) {
-        rd_buf_t b;
-        char ones[1024];
-        char twos[1024];
-        char threes[1024];
-        char fiftyfives[100]; /* 0x55 indicates "untouched" memory */
-        char buf[1024*3];
-        size_t r, pos;
-        rd_segment_t *seg, *newseg;
-
-        memset(ones, 0x1, sizeof(ones));
-        memset(twos, 0x2, sizeof(twos));
-        memset(threes, 0x3, sizeof(threes));
-        memset(fiftyfives, 0x55, sizeof(fiftyfives));
-        memset(buf, 0x55, sizeof(buf));
-
-        rd_buf_init(&b, 0, 0);
-
-        /*
-         * Verify write
-         */
-        r = rd_buf_write(&b, ones, 400);
-        RD_UT_ASSERT(r == 0, "write() returned position %"PRIusz, r);
-        pos = rd_buf_write_pos(&b);
-        RD_UT_ASSERT(pos == 400, "pos() returned position %"PRIusz, pos);
-
-        do_unittest_read_verify(&b, 0, 400, ones);
-
-        /*
-         * Seek and re-write
-         */
-        r = rd_buf_write_seek(&b, 200);
-        RD_UT_ASSERT(r == 0, "seek() failed");
-        pos = rd_buf_write_pos(&b);
-        RD_UT_ASSERT(pos == 200, "pos() returned position %"PRIusz, pos);
-
-        r = rd_buf_write(&b, twos, 100);
-        RD_UT_ASSERT(pos == 200, "write() returned position %"PRIusz, r);
-        pos = rd_buf_write_pos(&b);
-        RD_UT_ASSERT(pos == 200+100, "pos() returned position %"PRIusz, pos);
-
-        do_unittest_read_verify(&b, 0, 200, ones);
-        do_unittest_read_verify(&b, 200, 100, twos);
-
-        /* Make sure read() did not modify the write position. */
-        pos = rd_buf_write_pos(&b);
-        RD_UT_ASSERT(pos == 200+100, "pos() returned position %"PRIusz, pos);
-
-        /* Split buffer, write position is now at split where writes
-        * are not allowed (mid buffer). */
-        seg = rd_buf_get_segment_at_offset(&b, NULL, 50);
-        RD_UT_ASSERT(seg->seg_of != 0, "assumed mid-segment");
-        newseg = rd_segment_split(&b, seg, 50);
-        rd_buf_append_segment(&b, newseg);
-        seg = rd_buf_get_segment_at_offset(&b, NULL, 50);
-        RD_UT_ASSERT(seg != NULL, "seg");
-        RD_UT_ASSERT(seg == newseg, "newseg %p, seg %p", newseg, seg);
-        RD_UT_ASSERT(seg->seg_of > 0,
-                     "assumed beginning of segment, got %"PRIusz, seg->seg_of);
-
-        pos = rd_buf_write_pos(&b);
-        RD_UT_ASSERT(pos == 200+100, "pos() returned position %"PRIusz, pos);
-
-        /* Re-verify that nothing changed */
-        do_unittest_read_verify(&b, 0, 200, ones);
-        do_unittest_read_verify(&b, 200, 100, twos);
-
-        /* Do a write seek at buffer boundary, sub-sequent buffers should
-         * be destroyed. */
-        r = rd_buf_write_seek(&b, 50);
-        RD_UT_ASSERT(r == 0, "seek() failed");
-        do_unittest_read_verify(&b, 0, 50, ones);
-
-        rd_buf_destroy(&b);
-
-        RD_UT_PASS();
-}
-
-/**
- * @brief Unittest to verify payload is correctly written and read.
- *        Each written u32 word is the running CRC of the word count.
- */
-static int do_unittest_write_read_payload_correctness (void) {
-        uint32_t crc;
-        uint32_t write_crc, read_crc;
-        const int seed = 12345;
-        rd_buf_t b;
-        const size_t max_cnt = 20000;
-        rd_slice_t slice;
-        size_t r;
-        size_t i;
-        int pass;
-
-        crc = rd_crc32_init();
-        crc = rd_crc32_update(crc, (void *)&seed, sizeof(seed));
-
-        rd_buf_init(&b, 0, 0);
-        for (i = 0 ; i < max_cnt ; i++) {
-                crc = rd_crc32_update(crc, (void *)&i, sizeof(i));
-                rd_buf_write(&b, &crc, sizeof(crc));
-        }
-
-        write_crc = rd_crc32_finalize(crc);
-
-        r = rd_buf_len(&b);
-        RD_UT_ASSERT(r == max_cnt * sizeof(crc),
-                     "expected length %"PRIusz", not %"PRIusz,
-                     r, max_cnt * sizeof(crc));
-
-        /*
-         * Now verify the contents with a reader.
-         */
-        rd_slice_init_full(&slice, &b);
-
-        r = rd_slice_remains(&slice);
-        RD_UT_ASSERT(r == rd_buf_len(&b),
-                     "slice remains %"PRIusz", should be %"PRIusz,
-                     r, rd_buf_len(&b));
-
-        for (pass = 0 ; pass < 2 ; pass++) {
-                /* Two passes:
-                 *  - pass 1: using peek()
-                 *  - pass 2: using read()
-                 */
-                const char *pass_str = pass == 0 ? "peek":"read";
-
-                crc = rd_crc32_init();
-                crc = rd_crc32_update(crc, (void *)&seed, sizeof(seed));
-
-                for (i = 0 ; i < max_cnt ; i++) {
-                        uint32_t buf_crc;
-
-                        crc = rd_crc32_update(crc, (void *)&i, sizeof(&i));
-
-                        if (pass == 0)
-                                r = rd_slice_peek(&slice, i * sizeof(buf_crc),
-                                                  &buf_crc, sizeof(buf_crc));
-                        else
-                                r = rd_slice_read(&slice, &buf_crc,
-                                                  sizeof(buf_crc));
-                        RD_UT_ASSERT(r == sizeof(buf_crc),
-                                     "%s() at #%"PRIusz" failed: "
-                                     "r is %"PRIusz" not %"PRIusz,
-                                     pass_str, i, r, sizeof(buf_crc));
-                        RD_UT_ASSERT(buf_crc == crc,
-                                     "%s: invalid crc at #%"PRIusz
-                                     ": expected %"PRIu32", read %"PRIu32,
-                                     pass_str, i, crc, buf_crc);
-                }
-
-                read_crc = rd_crc32_finalize(crc);
-
-                RD_UT_ASSERT(read_crc == write_crc,
-                             "%s: finalized read crc %"PRIu32
-                             " != write crc %"PRIu32,
-                             pass_str, read_crc, write_crc);
-
-        }
-
-        r = rd_slice_remains(&slice);
-        RD_UT_ASSERT(r == 0,
-                     "slice remains %"PRIusz", should be %"PRIusz,
-                     r, (size_t)0);
-
-        rd_buf_destroy(&b);
-
-        RD_UT_PASS();
-}
-
-#define do_unittest_iov_verify(...) do {                                \
-                int __fail = do_unittest_iov_verify0(__VA_ARGS__);      \
-                RD_UT_ASSERT(!__fail, "iov_verify() failed");           \
-        } while (0)
-static int do_unittest_iov_verify0 (rd_buf_t *b,
-                                    size_t exp_iovcnt, size_t exp_totsize) {
-        #define MY_IOV_MAX 16
-        struct iovec iov[MY_IOV_MAX];
-        size_t iovcnt;
-        size_t i;
-        size_t totsize, sum;
-
-        rd_assert(exp_iovcnt <= MY_IOV_MAX);
-
-        totsize = rd_buf_get_write_iov(b, iov, &iovcnt, MY_IOV_MAX, exp_totsize);
-        RD_UT_ASSERT(totsize >= exp_totsize,
-                     "iov total size %"PRIusz" expected >= %"PRIusz,
-                     totsize, exp_totsize);
-        RD_UT_ASSERT(iovcnt >= exp_iovcnt && iovcnt <= MY_IOV_MAX,
-                     "iovcnt %"PRIusz
-                     ", expected %"PRIusz" < x <= MY_IOV_MAX",
-                     iovcnt, exp_iovcnt);
-
-        sum = 0;
-        for (i = 0 ; i < iovcnt ; i++) {
-                RD_UT_ASSERT(iov[i].iov_base,
-                             "iov #%"PRIusz" iov_base not set", i);
-                RD_UT_ASSERT(iov[i].iov_len,
-                             "iov #%"PRIusz" iov_len %"PRIusz" out of range",
-                             i, iov[i].iov_len);
-                sum += iov[i].iov_len;
-                RD_UT_ASSERT(sum <= totsize, "sum %"PRIusz" > totsize %"PRIusz,
-                             sum, totsize);
-        }
-
-        RD_UT_ASSERT(sum == totsize,
-                     "sum %"PRIusz" != totsize %"PRIusz,
-                     sum, totsize);
-
-        return 0;
-}
-
-
-/**
- * @brief Verify that buffer to iovec conversion works.
- */
-static int do_unittest_write_iov (void) {
-        rd_buf_t b;
-
-        rd_buf_init(&b, 0, 0);
-        rd_buf_write_ensure(&b, 100, 100);
-
-        do_unittest_iov_verify(&b, 1, 100);
-
-        /* Add a secondary buffer */
-        rd_buf_write_ensure(&b, 30000, 0);
-
-        do_unittest_iov_verify(&b, 2, 100+30000);
-
-
-        rd_buf_destroy(&b);
-
-        RD_UT_PASS();
-}
-
-
-int unittest_rdbuf (void) {
-        int fails = 0;
-
-        fails += do_unittest_write_read();
-        fails += do_unittest_write_split_seek();
-        fails += do_unittest_write_read_payload_correctness();
-        fails += do_unittest_write_iov();
-
-        return fails;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdbuf.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdbuf.h b/thirdparty/librdkafka-0.11.1/src/rdbuf.h
deleted file mode 100644
index aa6b4f1..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdbuf.h
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDBUF_H
-#define _RDBUF_H
-
-#ifndef _MSC_VER
-/* for struct iovec */
-#include <sys/socket.h>
-#include <sys/types.h>
-#endif
-
-#include "rdsysqueue.h"
-
-
-/**
- * @name Generic byte buffers
- *
- * @{
- *
- * A buffer is a list of segments, each segment having a memory pointer,
- * write offset, and capacity.
- *
- * The main buffer and segment structure is tailored for append-writing
- * or append-pushing foreign memory.
- *
- * Updates of previously written memory regions are possible through the
- * use of write_update() that takes an absolute offset.
- *
- * The write position is part of the buffer and segment structures, while
- * read is a separate object (rd_slice_t) that does not affect the buffer.
- */
-
-
-/**
- * @brief Buffer segment
- */
-typedef struct rd_segment_s {
-        TAILQ_ENTRY(rd_segment_s)  seg_link; /*<< rbuf_segments Link */
-        char  *seg_p;                /**< Backing-store memory */
-        size_t seg_of;               /**< Current relative write-position
-                                      *   (length of payload in this segment) */
-        size_t seg_size;             /**< Allocated size of seg_p */
-        size_t seg_absof;            /**< Absolute offset of this segment's
-                                      *   beginning in the grand rd_buf_t */
-        void (*seg_free) (void *p);  /**< Optional free function for seg_p */
-        int    seg_flags;            /**< Segment flags */
-#define RD_SEGMENT_F_RDONLY   0x1    /**< Read-only segment */
-#define RD_SEGMENT_F_FREE     0x2    /**< Free segment on destroy,
-                                      *   e.g, not a fixed segment. */
-} rd_segment_t;
-
-
-
-
-TAILQ_HEAD(rd_segment_head,rd_segment_s);
-
-/**
- * @brief Buffer, containing a list of segments.
- */
-typedef struct rd_buf_s {
-        struct rd_segment_head rbuf_segments; /**< TAILQ list of segments */
-        size_t            rbuf_segment_cnt;   /**< Number of segments */
-
-        rd_segment_t     *rbuf_wpos;          /**< Current write position seg */
-        size_t            rbuf_len;           /**< Current (written) length */
-        size_t            rbuf_size;          /**< Total allocated size of
-                                               *   all segments. */
-
-        char             *rbuf_extra;         /* Extra memory allocated for
-                                               * use by segment structs,
-                                               * buffer memory, etc. */
-        size_t            rbuf_extra_len;     /* Current extra memory used */
-        size_t            rbuf_extra_size;    /* Total size of extra memory */
-} rd_buf_t;
-
-
-
-/**
- * @brief A read-only slice of a buffer.
- */
-typedef struct rd_slice_s {
-        const rd_buf_t     *buf;    /**< Pointer to buffer */
-        const rd_segment_t *seg;    /**< Current read position segment.
-                                     *   Will point to NULL when end of
-                                     *   slice is reached. */
-        size_t              rof;    /**< Relative read offset in segment */
-        size_t              start;  /**< Slice start offset in buffer */
-        size_t              end;    /**< Slice end offset in buffer+1 */
-} rd_slice_t;
-
-
-
-/**
- * @returns the current write position (absolute offset)
- */
-static RD_INLINE RD_UNUSED size_t rd_buf_write_pos (const rd_buf_t *rbuf) {
-        const rd_segment_t *seg = rbuf->rbuf_wpos;
-
-        if (unlikely(!seg)) {
-#if ENABLE_DEVEL
-                rd_assert(rbuf->rbuf_len == 0);
-#endif
-                return 0;
-        }
-#if ENABLE_DEVEL
-        rd_assert(seg->seg_absof + seg->seg_of == rbuf->rbuf_len);
-#endif
-        return seg->seg_absof + seg->seg_of;
-}
-
-
-/**
- * @returns the number of bytes available for writing (before growing).
- */
-static RD_INLINE RD_UNUSED size_t rd_buf_write_remains (const rd_buf_t *rbuf) {
-        return rbuf->rbuf_size - rbuf->rbuf_len;
-}
-
-
-
-
-/**
- * @returns the number of bytes remaining to write to the given segment,
- *          and sets the \p *p pointer (unless NULL) to the start of
- *          the contiguous memory.
- */
-static RD_INLINE RD_UNUSED size_t
-rd_segment_write_remains (const rd_segment_t *seg, void **p) {
-        if (unlikely((seg->seg_flags & RD_SEGMENT_F_RDONLY)))
-                return 0;
-        if (p)
-                *p = (void *)(seg->seg_p + seg->seg_of);
-        return seg->seg_size - seg->seg_of;
-}
-
-
-
-/**
- * @returns the last segment for the buffer.
- */
-static RD_INLINE RD_UNUSED rd_segment_t *rd_buf_last (const rd_buf_t *rbuf) {
-        return TAILQ_LAST(&rbuf->rbuf_segments, rd_segment_head);
-}
-
-
-/**
- * @returns the total written buffer length
- */
-static RD_INLINE RD_UNUSED size_t rd_buf_len (const rd_buf_t *rbuf) {
-        return rbuf->rbuf_len;
-}
-
-
-int rd_buf_write_seek (rd_buf_t *rbuf, size_t absof);
-
-
-size_t rd_buf_write (rd_buf_t *rbuf, const void *payload, size_t size);
-size_t rd_buf_write_slice (rd_buf_t *rbuf, rd_slice_t *slice);
-size_t rd_buf_write_update (rd_buf_t *rbuf, size_t absof,
-                            const void *payload, size_t size);
-void rd_buf_push (rd_buf_t *rbuf, const void *payload, size_t size,
-                  void (*free_cb)(void *));
-
-
-size_t rd_buf_get_writable (rd_buf_t *rbuf, void **p);
-
-void rd_buf_write_ensure_contig (rd_buf_t *rbuf, size_t size);
-
-void rd_buf_write_ensure (rd_buf_t *rbuf, size_t min_size, size_t max_size);
-
-size_t rd_buf_get_write_iov (const rd_buf_t *rbuf,
-                             struct iovec *iovs, size_t *iovcntp,
-                             size_t iov_max, size_t size_max);
-
-void rd_buf_init (rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size);
-
-void rd_buf_destroy (rd_buf_t *rbuf);
-
-void rd_buf_dump (const rd_buf_t *rbuf, int do_hexdump);
-
-int unittest_rdbuf (void);
-
-
-/**@}*/
-
-
-
-
-/**
- * @name Buffer read operates on slices of an rd_buf_t and does not
- *       modify the underlying itself.
- *
- * @warning A slice will not be valid/safe after the buffer or
- *          segments have been modified by a buf write operation
- *          (write, update, write_seek, etc).
- * @{
- */
-
-
-/**
- * @returns the remaining length in the slice
- */
-#define rd_slice_remains(slice) ((slice)->end - rd_slice_abs_offset(slice))
-
-/**
- * @returns the total size of the slice, regardless of current position.
- */
-#define rd_slice_size(slice) ((slice)->end - (slice)->start)
-
-/**
- * @returns the read position in the slice as a new slice.
- */
-static RD_INLINE RD_UNUSED rd_slice_t rd_slice_pos (const rd_slice_t *slice) {
-        rd_slice_t newslice = *slice;
-
-        if (!slice->seg)
-                return newslice;
-
-        newslice.start = slice->seg->seg_absof + slice->rof;
-
-        return newslice;
-}
-
-/**
- * @returns the read position as an absolute buffer byte offset.
- * @remark this is the buffer offset, not the slice's local offset.
- */
-static RD_INLINE RD_UNUSED size_t
-rd_slice_abs_offset (const rd_slice_t *slice) {
-        if (unlikely(!slice->seg)) /* reader has reached the end */
-                return slice->end;
-
-        return slice->seg->seg_absof + slice->rof;
-}
-
-/**
- * @returns the read position as a byte offset.
- * @remark this is the slice-local offset, not the backing buffer's offset.
- */
-static RD_INLINE RD_UNUSED size_t rd_slice_offset (const rd_slice_t *slice) {
-        if (unlikely(!slice->seg)) /* reader has reached the end */
-                return rd_slice_size(slice);
-
-        return (slice->seg->seg_absof + slice->rof) - slice->start;
-}
-
-
-
-
-int rd_slice_init_seg (rd_slice_t *slice, const rd_buf_t *rbuf,
-                       const rd_segment_t *seg, size_t rof, size_t size);
-int rd_slice_init (rd_slice_t *slice, const rd_buf_t *rbuf,
-                   size_t absof, size_t size);
-void rd_slice_init_full (rd_slice_t *slice, const rd_buf_t *rbuf);
-
-size_t rd_slice_reader (rd_slice_t *slice, const void **p);
-size_t rd_slice_peeker (const rd_slice_t *slice, const void **p);
-
-size_t rd_slice_read (rd_slice_t *slice, void *dst, size_t size);
-size_t rd_slice_peek (const rd_slice_t *slice, size_t offset,
-                      void *dst, size_t size);
-
-size_t rd_slice_read_varint (rd_slice_t *slice, size_t *nump);
-
-const void *rd_slice_ensure_contig (rd_slice_t *slice, size_t size);
-
-int rd_slice_seek (rd_slice_t *slice, size_t offset);
-
-size_t rd_slice_get_iov (const rd_slice_t *slice,
-                         struct iovec *iovs, size_t *iovcntp,
-                         size_t iov_max, size_t size_max);
-
-
-uint32_t rd_slice_crc32 (rd_slice_t *slice);
-uint32_t rd_slice_crc32c (rd_slice_t *slice);
-
-
-int rd_slice_narrow (rd_slice_t *slice, rd_slice_t *save_slice, size_t size)
-        RD_WARN_UNUSED_RESULT;
-int rd_slice_narrow_relative (rd_slice_t *slice, rd_slice_t *save_slice,
-                               size_t relsize)
-        RD_WARN_UNUSED_RESULT;
-void rd_slice_widen (rd_slice_t *slice, const rd_slice_t *save_slice);
-int rd_slice_narrow_copy (const rd_slice_t *orig, rd_slice_t *new_slice,
-                           size_t size)
-        RD_WARN_UNUSED_RESULT;
-int rd_slice_narrow_copy_relative (const rd_slice_t *orig,
-                                    rd_slice_t *new_slice,
-                                    size_t relsize)
-        RD_WARN_UNUSED_RESULT;
-
-void rd_slice_dump (const rd_slice_t *slice, int do_hexdump);
-
-
-/**@}*/
-
-
-
-#endif /* _RDBUF_H */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdcrc32.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdcrc32.c b/thirdparty/librdkafka-0.11.1/src/rdcrc32.c
deleted file mode 100644
index ca71427..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdcrc32.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * \file rdcrc32.c
- * Functions and types for CRC checks.
- *
- * Generated on Tue May  8 17:37:04 2012,
- * by pycrc v0.7.10, http://www.tty1.net/pycrc/
- * using the configuration:
- *    Width        = 32
- *    Poly         = 0x04c11db7
- *    XorIn        = 0xffffffff
- *    ReflectIn    = True
- *    XorOut       = 0xffffffff
- *    ReflectOut   = True
- *    Algorithm    = table-driven
- *****************************************************************************/
-#include "rdcrc32.h"     /* include the header file generated with pycrc */
-#include <stdlib.h>
-#include <stdint.h>
-
-/**
- * Static table used for the table_driven implementation.
- *****************************************************************************/
-const rd_crc32_t crc_table[256] = {
-    0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
-    0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
-    0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
-    0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
-    0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
-    0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
-    0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
-    0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
-    0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
-    0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
-    0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
-    0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
-    0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
-    0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
-    0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
-    0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
-    0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
-    0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
-    0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
-    0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
-    0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
-    0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
-    0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
-    0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
-    0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
-    0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
-    0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
-    0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
-    0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
-    0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
-    0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
-    0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
-    0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
-    0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
-    0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
-    0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
-    0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
-    0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
-    0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
-    0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
-    0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
-    0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
-    0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
-    0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
-    0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
-    0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
-    0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
-    0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
-    0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
-    0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
-    0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
-    0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
-    0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
-    0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
-    0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
-    0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
-    0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
-    0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
-    0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
-    0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
-    0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
-    0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
-    0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
-    0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
-};
-
-/**
- * Reflect all bits of a \a data word of \a data_len bytes.
- *
- * \param data         The data word to be reflected.
- * \param data_len     The width of \a data expressed in number of bits.
- * \return             The reflected data.
- *****************************************************************************/
-rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len)
-{
-    unsigned int i;
-    rd_crc32_t ret;
-
-    ret = data & 0x01;
-    for (i = 1; i < data_len; i++) {
-        data >>= 1;
-        ret = (ret << 1) | (data & 0x01);
-    }
-    return ret;
-}
-
-
-
-
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdcrc32.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdcrc32.h b/thirdparty/librdkafka-0.11.1/src/rdcrc32.h
deleted file mode 100644
index 1024194..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdcrc32.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/**
- * \file rdcrc32.h
- * Functions and types for CRC checks.
- *
- * Generated on Tue May  8 17:36:59 2012,
- * by pycrc v0.7.10, http://www.tty1.net/pycrc/
- *
- * NOTE: Contains librd modifications:
- *       - rd_crc32() helper.
- *       - __RDCRC32___H__ define (was missing the '32' part).
- *
- * using the configuration:
- *    Width        = 32
- *    Poly         = 0x04c11db7
- *    XorIn        = 0xffffffff
- *    ReflectIn    = True
- *    XorOut       = 0xffffffff
- *    ReflectOut   = True
- *    Algorithm    = table-driven
- *****************************************************************************/
-#ifndef __RDCRC32___H__
-#define __RDCRC32___H__
-
-#include "rd.h"
-
-#include <stdlib.h>
-#include <stdint.h>
-
-#if WITH_ZLIB
-#include <zlib.h>
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/**
- * The definition of the used algorithm.
- *****************************************************************************/
-#define CRC_ALGO_TABLE_DRIVEN 1
-
-
-/**
- * The type of the CRC values.
- *
- * This type must be big enough to contain at least 32 bits.
- *****************************************************************************/
-typedef uint32_t rd_crc32_t;
-
-#if !WITH_ZLIB
-extern 	const rd_crc32_t crc_table[256];
-#endif
-
-
-/**
- * Reflect all bits of a \a data word of \a data_len bytes.
- *
- * \param data         The data word to be reflected.
- * \param data_len     The width of \a data expressed in number of bits.
- * \return             The reflected data.
- *****************************************************************************/
-rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len);
-
-
-/**
- * Calculate the initial crc value.
- *
- * \return     The initial crc value.
- *****************************************************************************/
-static RD_INLINE rd_crc32_t rd_crc32_init(void)
-{
-#if WITH_ZLIB
-        return crc32(0, NULL, 0);
-#else
-    return 0xffffffff;
-#endif
-}
-
-
-/**
- * Update the crc value with new data.
- *
- * \param crc      The current crc value.
- * \param data     Pointer to a buffer of \a data_len bytes.
- * \param data_len Number of bytes in the \a data buffer.
- * \return         The updated crc value.
- *****************************************************************************/
-	/**
- * Update the crc value with new data.
- *
- * \param crc      The current crc value.
- * \param data     Pointer to a buffer of \a data_len bytes.
- * \param data_len Number of bytes in the \a data buffer.
- * \return         The updated crc value.
- *****************************************************************************/
-static RD_INLINE RD_UNUSED
-rd_crc32_t rd_crc32_update(rd_crc32_t crc, const unsigned char *data, size_t data_len)
-{
-#if WITH_ZLIB
-        rd_assert(data_len <= UINT_MAX);
-        return crc32(crc, data, (uInt) data_len);
-#else
-    unsigned int tbl_idx;
-
-    while (data_len--) {
-        tbl_idx = (crc ^ *data) & 0xff;
-        crc = (crc_table[tbl_idx] ^ (crc >> 8)) & 0xffffffff;
-
-        data++;
-    }
-    return crc & 0xffffffff;
-#endif
-}
-
-
-/**
- * Calculate the final crc value.
- *
- * \param crc  The current crc value.
- * \return     The final crc value.
- *****************************************************************************/
-static RD_INLINE rd_crc32_t rd_crc32_finalize(rd_crc32_t crc)
-{
-#if WITH_ZLIB
-        return crc;
-#else
-    return crc ^ 0xffffffff;
-#endif
-}
-
-
-/**
- * Wrapper for performing CRC32 on the provided buffer.
- */
-static RD_INLINE rd_crc32_t rd_crc32 (const char *data, size_t data_len) {
-	return rd_crc32_finalize(rd_crc32_update(rd_crc32_init(),
-						 (const unsigned char *)data,
-						 data_len));
-}
-
-#ifdef __cplusplus
-}           /* closing brace for extern "C" */
-#endif
-
-#endif      /* __RDCRC32___H__ */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rddl.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rddl.c b/thirdparty/librdkafka-0.11.1/src/rddl.c
deleted file mode 100644
index 400441c..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rddl.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rddl.h"
-
-#if WITH_LIBDL
-#include <dlfcn.h>
-
-#elif defined( _MSC_VER)
-
-#else
-#error "Dynamic library loading not supported on this platform"
-#endif
-
-
-
-/**
- * @brief Latest thread-local dl error, normalized to suit our logging.
- * @returns a newly allocated string that must be freed
- */
-static char *rd_dl_error (void) {
-#if WITH_LIBDL
-        char *errstr;
-        char *s;
-        errstr = dlerror();
-        if (!errstr)
-                return rd_strdup("No error returned from dlerror()");
-
-        errstr = rd_strdup(errstr);
-        /* Change newlines to separators. */
-        while ((s = strchr(errstr, '\n')))
-                *s = '.';
-
-        return errstr;
-
-#elif defined(_MSC_VER)
-        char buf[1024];
-        rd_strerror_w32(GetLastError(), buf, sizeof(buf));
-        return rd_strdup(buf);
-#endif
-}
-
-/**
- * @brief Attempt to load library \p path.
- * @returns the library handle (platform dependent, thus opaque) on success,
- *          else NULL.
- */
-static rd_dl_hnd_t *
-rd_dl_open0 (const char *path, char *errstr, size_t errstr_size) {
-        void *handle;
-        const char *loadfunc;
-#if WITH_LIBDL
-        loadfunc = "dlopen()";
-        handle = dlopen(path, RTLD_NOW | RTLD_LOCAL);
-#elif defined(_MSC_VER)
-        loadfunc = "LoadLibrary()";
-        handle = (void *)LoadLibraryA(path);
-#endif
-        if (!handle) {
-                char *dlerrstr = rd_dl_error();
-                rd_snprintf(errstr, errstr_size, "%s failed: %s",
-                            loadfunc, dlerrstr);
-                rd_free(dlerrstr);
-        }
-        return (rd_dl_hnd_t *)handle;
-}
-
-
-/**
- * @brief Attempt to load library \p path, possibly with a filename extension
- *        which will be automatically resolved depending on platform.
- * @returns the library handle (platform dependent, thus opaque) on success,
- *          else NULL.
- */
-rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size) {
-        rd_dl_hnd_t *handle;
-        char *extpath;
-        size_t pathlen;
-        const char *td, *fname;
-        const char *solib_ext = SOLIB_EXT;
-
-        /* Try original path first. */
-        handle = rd_dl_open0(path, errstr, errstr_size);
-        if (handle)
-                return handle;
-
-        /* Original path not found, see if we can append the solib_ext
-         * filename extension. */
-
-        /* Get filename and filename extension.
-         * We can't rely on basename(3) since it is not portable */
-        fname = strrchr(path, '/');
-#ifdef _MSC_VER
-        td = strrchr(path, '\\');
-        if (td > fname)
-                fname = td;
-#endif
-        if (!fname)
-                fname = path;
-
-        td = strrchr(fname, '.');
-
-        /* If there is a filename extension ('.' within the last characters)
-         * then bail out, we will not append an extension in this case. */
-        if (td && td >= fname + strlen(fname) - strlen(SOLIB_EXT))
-                return NULL;
-
-        /* Append platform-specific library extension. */
-        pathlen = strlen(path);
-        extpath = rd_alloca(pathlen + strlen(solib_ext) + 1);
-        memcpy(extpath, path, pathlen);
-        memcpy(extpath+pathlen, solib_ext, strlen(solib_ext) + 1);
-
-        /* Try again with extension */
-        return rd_dl_open0(extpath, errstr, errstr_size);
-}
-
-
-/**
- * @brief Close handle previously returned by rd_dl_open()
- * @remark errors are ignored (what can we do anyway?)
- */
-void rd_dl_close (rd_dl_hnd_t *handle) {
-#if WITH_LIBDL
-        dlclose((void *)handle);
-#elif defined(_MSC_VER)
-        FreeLibrary((HMODULE)handle);
-#endif
-}
-
-/**
- * @brief look up address of \p symbol in library handle \p handle
- * @returns the function pointer on success or NULL on error.
- */
-void *
-rd_dl_sym (rd_dl_hnd_t *handle, const char *symbol,
-           char *errstr, size_t errstr_size) {
-        void *func;
-#if WITH_LIBDL
-        func = dlsym((void *)handle, symbol);
-#elif defined(_MSC_VER)
-        func = GetProcAddress((HMODULE)handle, symbol);
-#endif
-        if (!func) {
-                char *dlerrstr = rd_dl_error();
-                rd_snprintf(errstr, errstr_size,
-                            "Failed to load symbol \"%s\": %s",
-                            symbol, dlerrstr);
-                rd_free(dlerrstr);
-        }
-        return func;
-}
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rddl.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rddl.h b/thirdparty/librdkafka-0.11.1/src/rddl.h
deleted file mode 100644
index 6a49d2e..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rddl.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDDL_H
-#define _RDDL_H
-
-#include <sys/types.h>
-
-typedef void rd_dl_hnd_t;
-
-rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size);
-void rd_dl_close (rd_dl_hnd_t *handle);
-void *rd_dl_sym (rd_dl_hnd_t *handle, const char *symbol,
-                 char *errstr, size_t errstr_size);
-
-#endif /* _RDDL_H */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdendian.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdendian.h b/thirdparty/librdkafka-0.11.1/src/rdendian.h
deleted file mode 100644
index 0d2c510..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdendian.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#pragma once
-
-/**
- * Provides portable endian-swapping macros/functions.
- *
- *   be64toh()
- *   htobe64()
- *   be32toh()
- *   htobe32()
- *   be16toh()
- *   htobe16()
- */
-
-#ifdef __FreeBSD__
-  #include <sys/endian.h>
-#elif defined __GLIBC__
-  #include <endian.h>
- #ifndef be64toh
-   /* Support older glibc (<2.9) which lack be64toh */
-  #include <byteswap.h>
-  #if __BYTE_ORDER == __BIG_ENDIAN
-   #define be16toh(x) (x)
-   #define be32toh(x) (x)
-   #define be64toh(x) (x)
-  #else
-   #define be16toh(x) __bswap_16 (x)
-   #define be32toh(x) __bswap_32 (x)
-   #define be64toh(x) __bswap_64 (x)
-  #endif
- #endif
-
-#elif defined __CYGWIN__
- #include <endian.h>
-#elif defined __BSD__
-  #include <sys/endian.h>
-#elif defined sun
-  #include <sys/byteorder.h>
-  #include <sys/isa_defs.h>
-#define __LITTLE_ENDIAN 1234
-#define __BIG_ENDIAN 4321
-#ifdef _BIG_ENDIAN
-#define __BYTE_ORDER __BIG_ENDIAN
-#define be64toh(x) (x)
-#define be32toh(x) (x)
-#define be16toh(x) (x)
-#define le16toh(x) ((uint16_t)BSWAP_16(x))
-#define le32toh(x) BSWAP_32(x)
-#define le64toh(x) BSWAP_64(x)
-#define htole16(x) ((uint16_t)BSWAP_16(x))
-#define htole32(x) BSWAP_32(x)
-#define htole64(x) BSWAP_64(x)
-# else
-#define __BYTE_ORDER __LITTLE_ENDIAN
-#define be64toh(x) BSWAP_64(x)
-#define be32toh(x) ntohl(x)
-#define be16toh(x) ntohs(x)
-#define le16toh(x) (x)
-#define le32toh(x) (x)
-#define le64toh(x) (x)
-#define htole16(x) (x)
-#define htole32(x) (x)
-#define htole64(x) (x)
-#endif /* sun */
-
-#elif defined __APPLE__
-  #include <sys/_endian.h>
-  #include <libkern/OSByteOrder.h>
-  #define __bswap_64(x)      OSSwapInt64(x)
-  #define __bswap_32(x)      OSSwapInt32(x)
-  #define __bswap_16(x)      OSSwapInt16(x)
-
-#if __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN
-#define be64toh(x) (x)
-#define be32toh(x) (x)
-#define be16toh(x) (x)
-#else
-#define be64toh(x) OSSwapInt64(x)
-#define be32toh(x) OSSwapInt32(x)
-#define be16toh(x) OSSwapInt16(x)
-#endif
-
-#elif defined(_MSC_VER)
-#include <intrin.h>
-
-#define be64toh(x) _byteswap_uint64(x)
-#define be32toh(x) _byteswap_ulong(x)
-#define be16toh(x) _byteswap_ushort(x)
-
-#elif defined _AIX      /* AIX is always big endian */
-#define be64toh(x) (x)
-#define be32toh(x) (x)
-#define be16toh(x) (x)
-
-#else
- #include <endian.h>
-#endif
-
-
-
-/*
- * On Solaris, be64toh is a function, not a macro, so there's no need to error
- * if it's not defined.
- */
-#if !defined(__sun) && !defined(be64toh)
-#error Missing definition for be64toh
-#endif
-
-#ifndef be32toh
-#define be32toh(x) ntohl(x)
-#endif
-
-#ifndef be16toh
-#define be16toh(x) ntohs(x)
-#endif
-
-#ifndef htobe64
-#define htobe64(x) be64toh(x)
-#endif
-#ifndef htobe32
-#define htobe32(x) be32toh(x)
-#endif
-#ifndef htobe16
-#define htobe16(x) be16toh(x)
-#endif

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdgz.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdgz.c b/thirdparty/librdkafka-0.11.1/src/rdgz.c
deleted file mode 100644
index 3a3f6d2..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdgz.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdgz.h"
-
-#include <zlib.h>
-
-
-#define RD_GZ_CHUNK  262144
-
-void *rd_gz_decompress (const void *compressed, int compressed_len,
-			uint64_t *decompressed_lenp) {
-	int pass = 1;
-	char *decompressed = NULL;
-
-	/* First pass (1): calculate decompressed size.
-	 *                 (pass-1 is skipped if *decompressed_lenp is
-	 *                  non-zero).
-	 * Second pass (2): perform actual decompression.
-	 */
-
-	if (*decompressed_lenp != 0LLU)
-		pass++;
-
-	for (; pass <= 2 ; pass++) {
-		z_stream strm = RD_ZERO_INIT;
-		gz_header hdr;
-		char buf[512];
-		char *p;
-		int len;
-		int r;
-		
-		if ((r = inflateInit2(&strm, 15+32)) != Z_OK)
-			goto fail;
-
-		strm.next_in = (void *)compressed;
-		strm.avail_in = compressed_len;
-
-		if ((r = inflateGetHeader(&strm, &hdr)) != Z_OK) {
-			inflateEnd(&strm);
-			goto fail;
-		}
-
-		if (pass == 1) {
-			/* Use dummy output buffer */
-			p = buf;
-			len = sizeof(buf);
-		} else {
-			/* Use real output buffer */
-			p = decompressed;
-			len = (int)*decompressed_lenp;
-		}
-
-		do {
-			strm.next_out = (unsigned char *)p;
-			strm.avail_out = len;
-
-			r = inflate(&strm, Z_NO_FLUSH);
-			switch  (r) {
-			case Z_STREAM_ERROR:
-			case Z_NEED_DICT:
-			case Z_DATA_ERROR:
-			case Z_MEM_ERROR:
-				inflateEnd(&strm);
-				goto fail;
-			}
-
-			if (pass == 2) {
-				/* Advance output pointer (in pass 2). */
-				p += len - strm.avail_out;
-				len -= len - strm.avail_out;
-			}
-
-		} while (strm.avail_out == 0 && r != Z_STREAM_END);
-
-
-		if (pass == 1) {
-			*decompressed_lenp = strm.total_out;
-			if (!(decompressed = malloc((size_t)(*decompressed_lenp)+1))) {
-				inflateEnd(&strm);
-				return NULL;
-			}
-			/* For convenience of the caller we nul-terminate
-			 * the buffer. If it happens to be a string there
-			 * is no need for extra copies. */
-			decompressed[*decompressed_lenp] = '\0';
-		}
-
-		inflateEnd(&strm);
-	}
-
-	return decompressed;
-
-fail:
-	if (decompressed)
-		free(decompressed);
-	return NULL;
-}


[20/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_queue.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_queue.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_queue.c
deleted file mode 100644
index fab2899..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_queue.c
+++ /dev/null
@@ -1,860 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_interceptor.h"
-
-int RD_TLS rd_kafka_yield_thread = 0;
-
-void rd_kafka_yield (rd_kafka_t *rk) {
-        rd_kafka_yield_thread = 1;
-}
-
-
-/**
- * Destroy a queue. refcnt must be at zero.
- */
-void rd_kafka_q_destroy_final (rd_kafka_q_t *rkq) {
-
-        mtx_lock(&rkq->rkq_lock);
-	if (unlikely(rkq->rkq_qio != NULL)) {
-		rd_free(rkq->rkq_qio);
-		rkq->rkq_qio = NULL;
-	}
-        rd_kafka_q_fwd_set0(rkq, NULL, 0/*no-lock*/, 0 /*no-fwd-app*/);
-        rd_kafka_q_disable0(rkq, 0/*no-lock*/);
-        rd_kafka_q_purge0(rkq, 0/*no-lock*/);
-	assert(!rkq->rkq_fwdq);
-        mtx_unlock(&rkq->rkq_lock);
-	mtx_destroy(&rkq->rkq_lock);
-	cnd_destroy(&rkq->rkq_cond);
-
-        if (rkq->rkq_flags & RD_KAFKA_Q_F_ALLOCATED)
-                rd_free(rkq);
-}
-
-
-
-/**
- * Initialize a queue.
- */
-void rd_kafka_q_init (rd_kafka_q_t *rkq, rd_kafka_t *rk) {
-        rd_kafka_q_reset(rkq);
-	rkq->rkq_fwdq   = NULL;
-        rkq->rkq_refcnt = 1;
-        rkq->rkq_flags  = RD_KAFKA_Q_F_READY;
-        rkq->rkq_rk     = rk;
-	rkq->rkq_qio    = NULL;
-        rkq->rkq_serve  = NULL;
-        rkq->rkq_opaque = NULL;
-	mtx_init(&rkq->rkq_lock, mtx_plain);
-	cnd_init(&rkq->rkq_cond);
-}
-
-
-/**
- * Allocate a new queue and initialize it.
- */
-rd_kafka_q_t *rd_kafka_q_new0 (rd_kafka_t *rk, const char *func, int line) {
-        rd_kafka_q_t *rkq = rd_malloc(sizeof(*rkq));
-        rd_kafka_q_init(rkq, rk);
-        rkq->rkq_flags |= RD_KAFKA_Q_F_ALLOCATED;
-#if ENABLE_DEVEL
-	rd_snprintf(rkq->rkq_name, sizeof(rkq->rkq_name), "%s:%d", func, line);
-#else
-	rkq->rkq_name = func;
-#endif
-        return rkq;
-}
-
-/**
- * Set/clear forward queue.
- * Queue forwarding enables message routing inside rdkafka.
- * Typical use is to re-route all fetched messages for all partitions
- * to one single queue.
- *
- * All access to rkq_fwdq are protected by rkq_lock.
- */
-void rd_kafka_q_fwd_set0 (rd_kafka_q_t *srcq, rd_kafka_q_t *destq,
-                          int do_lock, int fwd_app) {
-
-        if (do_lock)
-                mtx_lock(&srcq->rkq_lock);
-        if (fwd_app)
-                srcq->rkq_flags |= RD_KAFKA_Q_F_FWD_APP;
-	if (srcq->rkq_fwdq) {
-		rd_kafka_q_destroy(srcq->rkq_fwdq);
-		srcq->rkq_fwdq = NULL;
-	}
-	if (destq) {
-		rd_kafka_q_keep(destq);
-
-		/* If rkq has ops in queue, append them to fwdq's queue.
-		 * This is an irreversible operation. */
-                if (srcq->rkq_qlen > 0) {
-			rd_dassert(destq->rkq_flags & RD_KAFKA_Q_F_READY);
-			rd_kafka_q_concat(destq, srcq);
-		}
-
-		srcq->rkq_fwdq = destq;
-	}
-        if (do_lock)
-                mtx_unlock(&srcq->rkq_lock);
-}
-
-/**
- * Purge all entries from a queue.
- */
-int rd_kafka_q_purge0 (rd_kafka_q_t *rkq, int do_lock) {
-	rd_kafka_op_t *rko, *next;
-	TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq);
-        rd_kafka_q_t *fwdq;
-        int cnt = 0;
-
-        if (do_lock)
-                mtx_lock(&rkq->rkq_lock);
-
-        if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
-                if (do_lock)
-                        mtx_unlock(&rkq->rkq_lock);
-                cnt = rd_kafka_q_purge(fwdq);
-                rd_kafka_q_destroy(fwdq);
-                return cnt;
-        }
-
-	/* Move ops queue to tmpq to avoid lock-order issue
-	 * by locks taken from rd_kafka_op_destroy(). */
-	TAILQ_MOVE(&tmpq, &rkq->rkq_q, rko_link);
-
-	/* Zero out queue */
-        rd_kafka_q_reset(rkq);
-
-        if (do_lock)
-                mtx_unlock(&rkq->rkq_lock);
-
-	/* Destroy the ops */
-	next = TAILQ_FIRST(&tmpq);
-	while ((rko = next)) {
-		next = TAILQ_NEXT(next, rko_link);
-		rd_kafka_op_destroy(rko);
-                cnt++;
-	}
-
-        return cnt;
-}
-
-
-/**
- * Purge all entries from a queue with a rktp version smaller than `version`
- * This shaves off the head of the queue, up until the first rko with
- * a non-matching rktp or version.
- */
-void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq,
-                                      rd_kafka_toppar_t *rktp, int version) {
-	rd_kafka_op_t *rko, *next;
-	TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq);
-        int32_t cnt = 0;
-        int64_t size = 0;
-        rd_kafka_q_t *fwdq;
-
-	mtx_lock(&rkq->rkq_lock);
-
-        if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
-                mtx_unlock(&rkq->rkq_lock);
-                rd_kafka_q_purge_toppar_version(fwdq, rktp, version);
-                rd_kafka_q_destroy(fwdq);
-                return;
-        }
-
-        /* Move ops to temporary queue and then destroy them from there
-         * without locks to avoid lock-ordering problems in op_destroy() */
-        while ((rko = TAILQ_FIRST(&rkq->rkq_q)) && rko->rko_rktp &&
-               rd_kafka_toppar_s2i(rko->rko_rktp) == rktp &&
-               rko->rko_version < version) {
-                TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link);
-                TAILQ_INSERT_TAIL(&tmpq, rko, rko_link);
-                cnt++;
-                size += rko->rko_len;
-        }
-
-
-        rkq->rkq_qlen -= cnt;
-        rkq->rkq_qsize -= size;
-	mtx_unlock(&rkq->rkq_lock);
-
-	next = TAILQ_FIRST(&tmpq);
-	while ((rko = next)) {
-		next = TAILQ_NEXT(next, rko_link);
-		rd_kafka_op_destroy(rko);
-	}
-}
-
-
-/**
- * Move 'cnt' entries from 'srcq' to 'dstq'.
- * If 'cnt' == -1 all entries will be moved.
- * Returns the number of entries moved.
- */
-int rd_kafka_q_move_cnt (rd_kafka_q_t *dstq, rd_kafka_q_t *srcq,
-			    int cnt, int do_locks) {
-	rd_kafka_op_t *rko;
-        int mcnt = 0;
-
-        if (do_locks) {
-		mtx_lock(&srcq->rkq_lock);
-		mtx_lock(&dstq->rkq_lock);
-	}
-
-	if (!dstq->rkq_fwdq && !srcq->rkq_fwdq) {
-		if (cnt > 0 && dstq->rkq_qlen == 0)
-			rd_kafka_q_io_event(dstq);
-
-		/* Optimization, if 'cnt' is equal/larger than all
-		 * items of 'srcq' we can move the entire queue. */
-		if (cnt == -1 ||
-                    cnt >= (int)srcq->rkq_qlen) {
-                        mcnt = srcq->rkq_qlen;
-                        rd_kafka_q_concat0(dstq, srcq, 0/*no-lock*/);
-		} else {
-			while (mcnt < cnt &&
-			       (rko = TAILQ_FIRST(&srcq->rkq_q))) {
-				TAILQ_REMOVE(&srcq->rkq_q, rko, rko_link);
-                                if (likely(!rko->rko_prio))
-                                        TAILQ_INSERT_TAIL(&dstq->rkq_q, rko,
-                                                          rko_link);
-                                else
-                                        TAILQ_INSERT_SORTED(
-                                                &dstq->rkq_q, rko,
-                                                rd_kafka_op_t *, rko_link,
-                                                rd_kafka_op_cmp_prio);
-
-                                srcq->rkq_qlen--;
-                                dstq->rkq_qlen++;
-                                srcq->rkq_qsize -= rko->rko_len;
-                                dstq->rkq_qsize += rko->rko_len;
-				mcnt++;
-			}
-		}
-	} else
-		mcnt = rd_kafka_q_move_cnt(dstq->rkq_fwdq ? dstq->rkq_fwdq:dstq,
-					   srcq->rkq_fwdq ? srcq->rkq_fwdq:srcq,
-					   cnt, do_locks);
-
-	if (do_locks) {
-		mtx_unlock(&dstq->rkq_lock);
-		mtx_unlock(&srcq->rkq_lock);
-	}
-
-	return mcnt;
-}
-
-
-/**
- * Filters out outdated ops.
- */
-static RD_INLINE rd_kafka_op_t *rd_kafka_op_filter (rd_kafka_q_t *rkq,
-						    rd_kafka_op_t *rko,
-						    int version) {
-        if (unlikely(!rko))
-                return NULL;
-
-        if (unlikely(rd_kafka_op_version_outdated(rko, version))) {
-		rd_kafka_q_deq0(rkq, rko);
-                rd_kafka_op_destroy(rko);
-                return NULL;
-        }
-
-        return rko;
-}
-
-
-
-/**
- * Pop an op from a queue.
- *
- * Locality: any thread.
- */
-
-
-/**
- * Serve q like rd_kafka_q_serve() until an op is found that can be returned
- * as an event to the application.
- *
- * @returns the first event:able op, or NULL on timeout.
- *
- * Locality: any thread
- */
-rd_kafka_op_t *rd_kafka_q_pop_serve (rd_kafka_q_t *rkq, int timeout_ms,
-                                     int32_t version,
-                                     rd_kafka_q_cb_type_t cb_type,
-                                     rd_kafka_q_serve_cb_t *callback,
-                                     void *opaque) {
-	rd_kafka_op_t *rko;
-        rd_kafka_q_t *fwdq;
-
-        rd_dassert(cb_type);
-
-	if (timeout_ms == RD_POLL_INFINITE)
-		timeout_ms = INT_MAX;
-
-	mtx_lock(&rkq->rkq_lock);
-
-        rd_kafka_yield_thread = 0;
-        if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
-                do {
-                        rd_kafka_op_res_t res;
-                        rd_ts_t pre;
-
-                        /* Filter out outdated ops */
-                retry:
-                        while ((rko = TAILQ_FIRST(&rkq->rkq_q)) &&
-                               !(rko = rd_kafka_op_filter(rkq, rko, version)))
-                                ;
-
-                        if (rko) {
-                                /* Proper versioned op */
-                                rd_kafka_q_deq0(rkq, rko);
-
-                                /* Ops with callbacks are considered handled
-                                 * and we move on to the next op, if any.
-                                 * Ops w/o callbacks are returned immediately */
-                                res = rd_kafka_op_handle(rkq->rkq_rk, rkq, rko,
-                                                         cb_type, opaque,
-                                                         callback);
-                                if (res == RD_KAFKA_OP_RES_HANDLED)
-                                        goto retry; /* Next op */
-                                else if (unlikely(res ==
-                                                  RD_KAFKA_OP_RES_YIELD)) {
-                                        /* Callback yielded, unroll */
-                                        mtx_unlock(&rkq->rkq_lock);
-                                        return NULL;
-                                } else
-                                        break; /* Proper op, handle below. */
-                        }
-
-                        /* No op, wait for one */
-                        pre = rd_clock();
-			if (cnd_timedwait_ms(&rkq->rkq_cond,
-					     &rkq->rkq_lock,
-					     timeout_ms) ==
-			    thrd_timedout) {
-				mtx_unlock(&rkq->rkq_lock);
-				return NULL;
-			}
-			/* Remove spent time */
-			timeout_ms -= (int) (rd_clock()-pre) / 1000;
-			if (timeout_ms < 0)
-				timeout_ms = RD_POLL_NOWAIT;
-
-		} while (timeout_ms != RD_POLL_NOWAIT);
-
-                mtx_unlock(&rkq->rkq_lock);
-
-        } else {
-                /* Since the q_pop may block we need to release the parent
-                 * queue's lock. */
-                mtx_unlock(&rkq->rkq_lock);
-		rko = rd_kafka_q_pop_serve(fwdq, timeout_ms, version,
-					   cb_type, callback, opaque);
-                rd_kafka_q_destroy(fwdq);
-        }
-
-
-	return rko;
-}
-
-rd_kafka_op_t *rd_kafka_q_pop (rd_kafka_q_t *rkq, int timeout_ms,
-                               int32_t version) {
-	return rd_kafka_q_pop_serve(rkq, timeout_ms, version,
-                                    RD_KAFKA_Q_CB_RETURN,
-                                    NULL, NULL);
-}
-
-
-/**
- * Pop all available ops from a queue and call the provided 
- * callback for each op.
- * `max_cnt` limits the number of ops served, 0 = no limit.
- *
- * Returns the number of ops served.
- *
- * Locality: any thread.
- */
-int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms,
-                      int max_cnt, rd_kafka_q_cb_type_t cb_type,
-                      rd_kafka_q_serve_cb_t *callback, void *opaque) {
-        rd_kafka_t *rk = rkq->rkq_rk;
-	rd_kafka_op_t *rko;
-	rd_kafka_q_t localq;
-        rd_kafka_q_t *fwdq;
-        int cnt = 0;
-
-        rd_dassert(cb_type);
-
-	mtx_lock(&rkq->rkq_lock);
-
-        rd_dassert(TAILQ_EMPTY(&rkq->rkq_q) || rkq->rkq_qlen > 0);
-        if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
-                int ret;
-                /* Since the q_pop may block we need to release the parent
-                 * queue's lock. */
-                mtx_unlock(&rkq->rkq_lock);
-		ret = rd_kafka_q_serve(fwdq, timeout_ms, max_cnt,
-                                       cb_type, callback, opaque);
-                rd_kafka_q_destroy(fwdq);
-		return ret;
-	}
-
-	if (timeout_ms == RD_POLL_INFINITE)
-		timeout_ms = INT_MAX;
-
-	/* Wait for op */
-	while (!(rko = TAILQ_FIRST(&rkq->rkq_q)) && timeout_ms != 0) {
-		if (cnd_timedwait_ms(&rkq->rkq_cond,
-				     &rkq->rkq_lock,
-				     timeout_ms) != thrd_success)
-			break;
-
-		timeout_ms = 0;
-	}
-
-	if (!rko) {
-		mtx_unlock(&rkq->rkq_lock);
-		return 0;
-	}
-
-	/* Move the first `max_cnt` ops. */
-	rd_kafka_q_init(&localq, rkq->rkq_rk);
-	rd_kafka_q_move_cnt(&localq, rkq, max_cnt == 0 ? -1/*all*/ : max_cnt,
-			    0/*no-locks*/);
-
-        mtx_unlock(&rkq->rkq_lock);
-
-        rd_kafka_yield_thread = 0;
-
-	/* Call callback for each op */
-        while ((rko = TAILQ_FIRST(&localq.rkq_q))) {
-                rd_kafka_op_res_t res;
-
-                rd_kafka_q_deq0(&localq, rko);
-                res = rd_kafka_op_handle(rk, &localq, rko, cb_type,
-                                         opaque, callback);
-                /* op must have been handled */
-                rd_kafka_assert(NULL, res != RD_KAFKA_OP_RES_PASS);
-                cnt++;
-
-                if (unlikely(res == RD_KAFKA_OP_RES_YIELD ||
-                             rd_kafka_yield_thread)) {
-                        /* Callback called rd_kafka_yield(), we must
-                         * stop our callback dispatching and put the
-                         * ops in localq back on the original queue head. */
-                        if (!TAILQ_EMPTY(&localq.rkq_q))
-                                rd_kafka_q_prepend(rkq, &localq);
-                        break;
-                }
-	}
-
-	rd_kafka_q_destroy(&localq);
-
-	return cnt;
-}
-
-
-
-
-
-/**
- * Populate 'rkmessages' array with messages from 'rkq'.
- * If 'auto_commit' is set, each message's offset will be committed
- * to the offset store for that toppar.
- *
- * Returns the number of messages added.
- */
-
-int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms,
-                                 rd_kafka_message_t **rkmessages,
-                                 size_t rkmessages_size) {
-	unsigned int cnt = 0;
-        TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq);
-        rd_kafka_op_t *rko, *next;
-        rd_kafka_t *rk = rkq->rkq_rk;
-        rd_kafka_q_t *fwdq;
-
-	mtx_lock(&rkq->rkq_lock);
-        if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
-                /* Since the q_pop may block we need to release the parent
-                 * queue's lock. */
-                mtx_unlock(&rkq->rkq_lock);
-		cnt = rd_kafka_q_serve_rkmessages(fwdq, timeout_ms,
-						  rkmessages, rkmessages_size);
-                rd_kafka_q_destroy(fwdq);
-		return cnt;
-	}
-        mtx_unlock(&rkq->rkq_lock);
-
-        rd_kafka_yield_thread = 0;
-	while (cnt < rkmessages_size) {
-                rd_kafka_op_res_t res;
-
-                mtx_lock(&rkq->rkq_lock);
-
-		while (!(rko = TAILQ_FIRST(&rkq->rkq_q))) {
-			if (cnd_timedwait_ms(&rkq->rkq_cond, &rkq->rkq_lock,
-                                             timeout_ms) == thrd_timedout)
-				break;
-		}
-
-		if (!rko) {
-                        mtx_unlock(&rkq->rkq_lock);
-			break; /* Timed out */
-                }
-
-		rd_kafka_q_deq0(rkq, rko);
-
-                mtx_unlock(&rkq->rkq_lock);
-
-		if (rd_kafka_op_version_outdated(rko, 0)) {
-                        /* Outdated op, put on discard queue */
-                        TAILQ_INSERT_TAIL(&tmpq, rko, rko_link);
-                        continue;
-                }
-
-                /* Serve non-FETCH callbacks */
-                res = rd_kafka_poll_cb(rk, rkq, rko,
-                                       RD_KAFKA_Q_CB_RETURN, NULL);
-                if (res == RD_KAFKA_OP_RES_HANDLED) {
-                        /* Callback served, rko is destroyed. */
-                        continue;
-                } else if (unlikely(res == RD_KAFKA_OP_RES_YIELD ||
-                                    rd_kafka_yield_thread)) {
-                        /* Yield. */
-                        break;
-                }
-                rd_dassert(res == RD_KAFKA_OP_RES_PASS);
-
-		/* Auto-commit offset, if enabled. */
-		if (!rko->rko_err && rko->rko_type == RD_KAFKA_OP_FETCH) {
-                        rd_kafka_toppar_t *rktp;
-                        rktp = rd_kafka_toppar_s2i(rko->rko_rktp);
-			rd_kafka_toppar_lock(rktp);
-			rktp->rktp_app_offset = rko->rko_u.fetch.rkm.rkm_offset+1;
-                        if (rktp->rktp_cgrp &&
-			    rk->rk_conf.enable_auto_offset_store)
-                                rd_kafka_offset_store0(rktp,
-						       rktp->rktp_app_offset,
-                                                       0/* no lock */);
-			rd_kafka_toppar_unlock(rktp);
-                }
-
-		/* Get rkmessage from rko and append to array. */
-		rkmessages[cnt++] = rd_kafka_message_get(rko);
-	}
-
-        /* Discard non-desired and already handled ops */
-        next = TAILQ_FIRST(&tmpq);
-        while (next) {
-                rko = next;
-                next = TAILQ_NEXT(next, rko_link);
-                rd_kafka_op_destroy(rko);
-        }
-
-
-	return cnt;
-}
-
-
-
-void rd_kafka_queue_destroy (rd_kafka_queue_t *rkqu) {
-	rd_kafka_q_disable(rkqu->rkqu_q);
-	rd_kafka_q_destroy(rkqu->rkqu_q);
-	rd_free(rkqu);
-}
-
-rd_kafka_queue_t *rd_kafka_queue_new0 (rd_kafka_t *rk, rd_kafka_q_t *rkq) {
-	rd_kafka_queue_t *rkqu;
-
-	rkqu = rd_calloc(1, sizeof(*rkqu));
-
-	rkqu->rkqu_q = rkq;
-	rd_kafka_q_keep(rkq);
-
-        rkqu->rkqu_rk = rk;
-
-	return rkqu;
-}
-
-
-rd_kafka_queue_t *rd_kafka_queue_new (rd_kafka_t *rk) {
-	rd_kafka_q_t *rkq;
-	rd_kafka_queue_t *rkqu;
-
-	rkq = rd_kafka_q_new(rk);
-	rkqu = rd_kafka_queue_new0(rk, rkq);
-	rd_kafka_q_destroy(rkq); /* Loose refcount from q_new, one is held
-				  * by queue_new0 */
-	return rkqu;
-}
-
-
-rd_kafka_queue_t *rd_kafka_queue_get_main (rd_kafka_t *rk) {
-	return rd_kafka_queue_new0(rk, rk->rk_rep);
-}
-
-
-rd_kafka_queue_t *rd_kafka_queue_get_consumer (rd_kafka_t *rk) {
-	if (!rk->rk_cgrp)
-		return NULL;
-	return rd_kafka_queue_new0(rk, rk->rk_cgrp->rkcg_q);
-}
-
-rd_kafka_queue_t *rd_kafka_queue_get_partition (rd_kafka_t *rk,
-                                                const char *topic,
-                                                int32_t partition) {
-        shptr_rd_kafka_toppar_t *s_rktp;
-        rd_kafka_toppar_t *rktp;
-        rd_kafka_queue_t *result;
-
-        if (rk->rk_type == RD_KAFKA_PRODUCER)
-                return NULL;
-
-        s_rktp = rd_kafka_toppar_get2(rk, topic,
-                                      partition,
-                                      0, /* no ua_on_miss */
-                                      1 /* create_on_miss */);
-
-        if (!s_rktp)
-                return NULL;
-
-        rktp = rd_kafka_toppar_s2i(s_rktp);
-        result = rd_kafka_queue_new0(rk, rktp->rktp_fetchq);
-        rd_kafka_toppar_destroy(s_rktp);
-
-        return result;
-}
-
-rd_kafka_resp_err_t rd_kafka_set_log_queue (rd_kafka_t *rk,
-                                            rd_kafka_queue_t *rkqu) {
-        rd_kafka_q_t *rkq;
-        if (!rkqu)
-                rkq = rk->rk_rep;
-        else
-                rkq = rkqu->rkqu_q;
-        rd_kafka_q_fwd_set(rk->rk_logq, rkq);
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-void rd_kafka_queue_forward (rd_kafka_queue_t *src, rd_kafka_queue_t *dst) {
-        rd_kafka_q_fwd_set0(src->rkqu_q, dst ? dst->rkqu_q : NULL,
-                            1, /* do_lock */
-                            1 /* fwd_app */);
-}
-
-
-size_t rd_kafka_queue_length (rd_kafka_queue_t *rkqu) {
-	return (size_t)rd_kafka_q_len(rkqu->rkqu_q);
-}
-
-/**
- * @brief Enable or disable(fd==-1) fd-based wake-ups for queue
- */
-void rd_kafka_q_io_event_enable (rd_kafka_q_t *rkq, int fd,
-                                 const void *payload, size_t size) {
-        struct rd_kafka_q_io *qio = NULL;
-
-        if (fd != -1) {
-                qio = rd_malloc(sizeof(*qio) + size);
-                qio->fd = fd;
-                qio->size = size;
-                qio->payload = (void *)(qio+1);
-                memcpy(qio->payload, payload, size);
-        }
-
-        mtx_lock(&rkq->rkq_lock);
-        if (rkq->rkq_qio) {
-                rd_free(rkq->rkq_qio);
-                rkq->rkq_qio = NULL;
-        }
-
-        if (fd != -1) {
-                rkq->rkq_qio = qio;
-        }
-
-        mtx_unlock(&rkq->rkq_lock);
-
-}
-
-void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd,
-                                     const void *payload, size_t size) {
-        rd_kafka_q_io_event_enable(rkqu->rkqu_q, fd, payload, size);
-}
-
-
-/**
- * Helper: wait for single op on 'rkq', and return its error,
- * or .._TIMED_OUT on timeout.
- */
-rd_kafka_resp_err_t rd_kafka_q_wait_result (rd_kafka_q_t *rkq, int timeout_ms) {
-        rd_kafka_op_t *rko;
-        rd_kafka_resp_err_t err;
-
-        rko = rd_kafka_q_pop(rkq, timeout_ms, 0);
-        if (!rko)
-                err = RD_KAFKA_RESP_ERR__TIMED_OUT;
-        else {
-                err = rko->rko_err;
-                rd_kafka_op_destroy(rko);
-        }
-
-        return err;
-}
-
-
-/**
- * Apply \p callback on each op in queue.
- * If the callback wishes to remove the rko it must do so using
- * using rd_kafka_op_deq0().
- *
- * @returns the sum of \p callback() return values.
- * @remark rkq will be locked, callers should take care not to
- *         interact with \p rkq through other means from the callback to avoid
- *         deadlocks.
- */
-int rd_kafka_q_apply (rd_kafka_q_t *rkq,
-                      int (*callback) (rd_kafka_q_t *rkq, rd_kafka_op_t *rko,
-                                       void *opaque),
-                      void *opaque) {
-	rd_kafka_op_t *rko, *next;
-        rd_kafka_q_t *fwdq;
-        int cnt = 0;
-
-        mtx_lock(&rkq->rkq_lock);
-        if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
-                mtx_unlock(&rkq->rkq_lock);
-		cnt = rd_kafka_q_apply(fwdq, callback, opaque);
-                rd_kafka_q_destroy(fwdq);
-		return cnt;
-	}
-
-	next = TAILQ_FIRST(&rkq->rkq_q);
-	while ((rko = next)) {
-		next = TAILQ_NEXT(next, rko_link);
-                cnt += callback(rkq, rko, opaque);
-	}
-        mtx_unlock(&rkq->rkq_lock);
-
-        return cnt;
-}
-
-/**
- * @brief Convert relative to absolute offsets and also purge any messages
- *        that are older than \p min_offset.
- * @remark Error ops with ERR__NOT_IMPLEMENTED will not be purged since
- *         they are used to indicate unknnown compression codecs and compressed
- *         messagesets may have a starting offset lower than what we requested.
- * @remark \p rkq locking is not performed (caller's responsibility)
- * @remark Must NOT be used on fwdq.
- */
-void rd_kafka_q_fix_offsets (rd_kafka_q_t *rkq, int64_t min_offset,
-			     int64_t base_offset) {
-	rd_kafka_op_t *rko, *next;
-	int     adj_len  = 0;
-	int64_t adj_size = 0;
-
-	rd_kafka_assert(NULL, !rkq->rkq_fwdq);
-
-	next = TAILQ_FIRST(&rkq->rkq_q);
-	while ((rko = next)) {
-		next = TAILQ_NEXT(next, rko_link);
-
-		if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH))
-			continue;
-
-		rko->rko_u.fetch.rkm.rkm_offset += base_offset;
-
-		if (rko->rko_u.fetch.rkm.rkm_offset < min_offset &&
-		    rko->rko_err != RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED) {
-			adj_len++;
-			adj_size += rko->rko_len;
-			TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link);
-			rd_kafka_op_destroy(rko);
-			continue;
-		}
-	}
-
-
-	rkq->rkq_qlen  -= adj_len;
-	rkq->rkq_qsize -= adj_size;
-}
-
-
-/**
- * @brief Print information and contents of queue
- */
-void rd_kafka_q_dump (FILE *fp, rd_kafka_q_t *rkq) {
-        mtx_lock(&rkq->rkq_lock);
-        fprintf(fp, "Queue %p \"%s\" (refcnt %d, flags 0x%x, %d ops, "
-                "%"PRId64" bytes)\n",
-                rkq, rkq->rkq_name, rkq->rkq_refcnt, rkq->rkq_flags,
-                rkq->rkq_qlen, rkq->rkq_qsize);
-
-        if (rkq->rkq_qio)
-                fprintf(fp, " QIO fd %d\n", rkq->rkq_qio->fd);
-        if (rkq->rkq_serve)
-                fprintf(fp, " Serve callback %p, opaque %p\n",
-                        rkq->rkq_serve, rkq->rkq_opaque);
-
-        if (rkq->rkq_fwdq) {
-                fprintf(fp, " Forwarded ->\n");
-                rd_kafka_q_dump(fp, rkq->rkq_fwdq);
-        } else {
-                rd_kafka_op_t *rko;
-
-                if (!TAILQ_EMPTY(&rkq->rkq_q))
-                        fprintf(fp, " Queued ops:\n");
-                TAILQ_FOREACH(rko, &rkq->rkq_q, rko_link) {
-                        fprintf(fp, "  %p %s (v%"PRId32", flags 0x%x, "
-                                "prio %d, len %"PRId32", source %s, "
-                                "replyq %p)\n",
-                                rko, rd_kafka_op2str(rko->rko_type),
-                                rko->rko_version, rko->rko_flags,
-                                rko->rko_prio, rko->rko_len,
-                                #if ENABLE_DEVEL
-                                rko->rko_source
-                                #else
-                                "-"
-                                #endif
-                                ,
-                                rko->rko_replyq.q
-                                );
-                }
-        }
-
-        mtx_unlock(&rkq->rkq_lock);
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_queue.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_queue.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_queue.h
deleted file mode 100644
index 7595b17..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_queue.h
+++ /dev/null
@@ -1,731 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include "rdkafka_op.h"
-#include "rdkafka_int.h"
-
-#ifdef _MSC_VER
-#include <io.h> /* for _write() */
-#endif
-
-
-TAILQ_HEAD(rd_kafka_op_tailq, rd_kafka_op_s);
-
-struct rd_kafka_q_s {
-	mtx_t  rkq_lock;
-	cnd_t  rkq_cond;
-	struct rd_kafka_q_s *rkq_fwdq; /* Forwarded/Routed queue.
-					* Used in place of this queue
-					* for all operations. */
-
-	struct rd_kafka_op_tailq rkq_q;  /* TAILQ_HEAD(, rd_kafka_op_s) */
-	int           rkq_qlen;      /* Number of entries in queue */
-        int64_t       rkq_qsize;     /* Size of all entries in queue */
-        int           rkq_refcnt;
-        int           rkq_flags;
-#define RD_KAFKA_Q_F_ALLOCATED  0x1  /* Allocated: rd_free on destroy */
-#define RD_KAFKA_Q_F_READY      0x2  /* Queue is ready to be used.
-                                      * Flag is cleared on destroy */
-#define RD_KAFKA_Q_F_FWD_APP    0x4  /* Queue is being forwarded by a call
-                                      * to rd_kafka_queue_forward. */
-
-        rd_kafka_t   *rkq_rk;
-	struct rd_kafka_q_io *rkq_qio;   /* FD-based application signalling */
-
-        /* Op serve callback (optional).
-         * Mainly used for forwarded queues to use the original queue's
-         * serve function from the forwarded position.
-         * Shall return 1 if op was handled, else 0. */
-        rd_kafka_q_serve_cb_t *rkq_serve;
-        void *rkq_opaque;
-
-#if ENABLE_DEVEL
-	char rkq_name[64];       /* Debugging: queue name (FUNC:LINE) */
-#else
-	const char *rkq_name;    /* Debugging: queue name (FUNC) */
-#endif
-};
-
-
-/* FD-based application signalling state holder. */
-struct rd_kafka_q_io {
-	int    fd;
-	void  *payload;
-	size_t size;
-};
-
-
-
-/**
- * @return true if queue is ready/enabled, else false.
- * @remark queue luck must be held by caller (if applicable)
- */
-static RD_INLINE RD_UNUSED
-int rd_kafka_q_ready (rd_kafka_q_t *rkq) {
-	return rkq->rkq_flags & RD_KAFKA_Q_F_READY;
-}
-
-
-
-
-void rd_kafka_q_init (rd_kafka_q_t *rkq, rd_kafka_t *rk);
-rd_kafka_q_t *rd_kafka_q_new0 (rd_kafka_t *rk, const char *func, int line);
-#define rd_kafka_q_new(rk) rd_kafka_q_new0(rk,__FUNCTION__,__LINE__)
-void rd_kafka_q_destroy_final (rd_kafka_q_t *rkq);
-
-#define rd_kafka_q_lock(rkqu) mtx_lock(&(rkqu)->rkq_lock)
-#define rd_kafka_q_unlock(rkqu) mtx_unlock(&(rkqu)->rkq_lock)
-
-static RD_INLINE RD_UNUSED
-rd_kafka_q_t *rd_kafka_q_keep (rd_kafka_q_t *rkq) {
-        mtx_lock(&rkq->rkq_lock);
-        rkq->rkq_refcnt++;
-        mtx_unlock(&rkq->rkq_lock);
-	return rkq;
-}
-
-static RD_INLINE RD_UNUSED
-rd_kafka_q_t *rd_kafka_q_keep_nolock (rd_kafka_q_t *rkq) {
-        rkq->rkq_refcnt++;
-	return rkq;
-}
-
-
-/**
- * @returns the queue's name (used for debugging)
- */
-static RD_INLINE RD_UNUSED
-const char *rd_kafka_q_name (rd_kafka_q_t *rkq) {
-	return rkq->rkq_name;
-}
-
-/**
- * @returns the final destination queue name (after forwarding)
- * @remark rkq MUST NOT be locked
- */
-static RD_INLINE RD_UNUSED
-const char *rd_kafka_q_dest_name (rd_kafka_q_t *rkq) {
-	const char *ret;
-	mtx_lock(&rkq->rkq_lock);
-	if (rkq->rkq_fwdq)
-		ret = rd_kafka_q_dest_name(rkq->rkq_fwdq);
-	else
-		ret = rd_kafka_q_name(rkq);
-	mtx_unlock(&rkq->rkq_lock);
-	return ret;
-}
-
-
-static RD_INLINE RD_UNUSED
-void rd_kafka_q_destroy (rd_kafka_q_t *rkq) {
-        int do_delete = 0;
-
-        mtx_lock(&rkq->rkq_lock);
-        rd_kafka_assert(NULL, rkq->rkq_refcnt > 0);
-        do_delete = !--rkq->rkq_refcnt;
-        mtx_unlock(&rkq->rkq_lock);
-
-        if (unlikely(do_delete))
-                rd_kafka_q_destroy_final(rkq);
-}
-
-
-/**
- * Reset a queue.
- * WARNING: All messages will be lost and leaked.
- * NOTE: No locking is performed.
- */
-static RD_INLINE RD_UNUSED
-void rd_kafka_q_reset (rd_kafka_q_t *rkq) {
-	TAILQ_INIT(&rkq->rkq_q);
-        rd_dassert(TAILQ_EMPTY(&rkq->rkq_q));
-        rkq->rkq_qlen = 0;
-        rkq->rkq_qsize = 0;
-}
-
-
-/**
- * Disable a queue.
- * Attempting to enqueue messages to the queue will destroy them.
- */
-static RD_INLINE RD_UNUSED
-void rd_kafka_q_disable0 (rd_kafka_q_t *rkq, int do_lock) {
-        if (do_lock)
-                mtx_lock(&rkq->rkq_lock);
-        rkq->rkq_flags &= ~RD_KAFKA_Q_F_READY;
-        if (do_lock)
-                mtx_unlock(&rkq->rkq_lock);
-}
-#define rd_kafka_q_disable(rkq) rd_kafka_q_disable0(rkq, 1/*lock*/)
-
-/**
- * Forward 'srcq' to 'destq'
- */
-void rd_kafka_q_fwd_set0 (rd_kafka_q_t *srcq, rd_kafka_q_t *destq,
-                          int do_lock, int fwd_app);
-#define rd_kafka_q_fwd_set(S,D) rd_kafka_q_fwd_set0(S,D,1/*lock*/,\
-                                                    0/*no fwd_app*/)
-
-/**
- * @returns the forward queue (if any) with its refcount increased.
- * @locks rd_kafka_q_lock(rkq) == !do_lock
- */
-static RD_INLINE RD_UNUSED
-rd_kafka_q_t *rd_kafka_q_fwd_get (rd_kafka_q_t *rkq, int do_lock) {
-        rd_kafka_q_t *fwdq;
-        if (do_lock)
-                mtx_lock(&rkq->rkq_lock);
-
-        if ((fwdq = rkq->rkq_fwdq))
-                rd_kafka_q_keep(fwdq);
-
-        if (do_lock)
-                mtx_unlock(&rkq->rkq_lock);
-
-        return fwdq;
-}
-
-
-/**
- * @returns true if queue is forwarded, else false.
- *
- * @remark Thread-safe.
- */
-static RD_INLINE RD_UNUSED int rd_kafka_q_is_fwded (rd_kafka_q_t *rkq) {
-	int r;
-	mtx_lock(&rkq->rkq_lock);
-	r = rkq->rkq_fwdq ? 1 : 0;
-	mtx_unlock(&rkq->rkq_lock);
-	return r;
-}
-
-
-
-/**
- * @brief Trigger an IO event for this queue.
- *
- * @remark Queue MUST be locked
- */
-static RD_INLINE RD_UNUSED
-void rd_kafka_q_io_event (rd_kafka_q_t *rkq) {
-	ssize_t r;
-
-	if (likely(!rkq->rkq_qio))
-		return;
-
-#ifdef _MSC_VER
-	r = _write(rkq->rkq_qio->fd, rkq->rkq_qio->payload, (int)rkq->rkq_qio->size);
-#else
-        r = write(rkq->rkq_qio->fd, rkq->rkq_qio->payload, rkq->rkq_qio->size);
-#endif
-	if (r == -1) {
-		fprintf(stderr,
-			"[ERROR:librdkafka:rd_kafka_q_io_event: "
-			"write(%d,..,%d) failed on queue %p \"%s\": %s: "
-			"disabling further IO events]\n",
-			rkq->rkq_qio->fd, (int)rkq->rkq_qio->size,
-			rkq, rd_kafka_q_name(rkq), rd_strerror(errno));
-		/* FIXME: Log this, somehow */
-		rd_free(rkq->rkq_qio);
-		rkq->rkq_qio = NULL;
-	}
-}
-
-
-/**
- * @brief rko->rko_prio comparator
- * @remark: descending order: higher priority takes preceedence.
- */
-static RD_INLINE RD_UNUSED
-int rd_kafka_op_cmp_prio (const void *_a, const void *_b) {
-        const rd_kafka_op_t *a = _a, *b = _b;
-
-        return b->rko_prio - a->rko_prio;
-}
-
-
-/**
- * @brief Low-level unprotected enqueue that only performs
- *        the actual queue enqueue and counter updates.
- * @remark Will not perform locking, signaling, fwdq, READY checking, etc.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_q_enq0 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, int at_head) {
-    if (likely(!rko->rko_prio))
-        TAILQ_INSERT_TAIL(&rkq->rkq_q, rko, rko_link);
-    else if (at_head)
-            TAILQ_INSERT_HEAD(&rkq->rkq_q, rko, rko_link);
-    else
-        TAILQ_INSERT_SORTED(&rkq->rkq_q, rko, rd_kafka_op_t *,
-                            rko_link, rd_kafka_op_cmp_prio);
-    rkq->rkq_qlen++;
-    rkq->rkq_qsize += rko->rko_len;
-}
-
-
-/**
- * @brief Enqueue the 'rko' op at the tail of the queue 'rkq'.
- *
- * The provided 'rko' is either enqueued or destroyed.
- *
- * @returns 1 if op was enqueued or 0 if queue is disabled and
- * there was no replyq to enqueue on in which case the rko is destroyed.
- *
- * Locality: any thread.
- */
-static RD_INLINE RD_UNUSED
-int rd_kafka_q_enq (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) {
-	rd_kafka_q_t *fwdq;
-
-	mtx_lock(&rkq->rkq_lock);
-
-        rd_dassert(rkq->rkq_refcnt > 0);
-
-        if (unlikely(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY))) {
-
-                /* Queue has been disabled, reply to and fail the rko. */
-                mtx_unlock(&rkq->rkq_lock);
-
-                return rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR__DESTROY);
-        }
-
-        if (!rko->rko_serve && rkq->rkq_serve) {
-                /* Store original queue's serve callback and opaque
-                 * prior to forwarding. */
-                rko->rko_serve = rkq->rkq_serve;
-                rko->rko_serve_opaque = rkq->rkq_opaque;
-        }
-
-	if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
-		rd_kafka_q_enq0(rkq, rko, 0);
-		cnd_signal(&rkq->rkq_cond);
-		if (rkq->rkq_qlen == 1)
-			rd_kafka_q_io_event(rkq);
-		mtx_unlock(&rkq->rkq_lock);
-	} else {
-		mtx_unlock(&rkq->rkq_lock);
-		rd_kafka_q_enq(fwdq, rko);
-		rd_kafka_q_destroy(fwdq);
-	}
-
-        return 1;
-}
-
-
-/**
- * @brief Re-enqueue rko at head of rkq.
- *
- * The provided 'rko' is either enqueued or destroyed.
- *
- * @returns 1 if op was enqueued or 0 if queue is disabled and
- * there was no replyq to enqueue on in which case the rko is destroyed.
- *
- * @locks rkq MUST BE LOCKED
- *
- * Locality: any thread.
- */
-static RD_INLINE RD_UNUSED
-int rd_kafka_q_reenq (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) {
-        rd_kafka_q_t *fwdq;
-
-        rd_dassert(rkq->rkq_refcnt > 0);
-
-        if (unlikely(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY)))
-                return rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR__DESTROY);
-
-        if (!rko->rko_serve && rkq->rkq_serve) {
-                /* Store original queue's serve callback and opaque
-                 * prior to forwarding. */
-                rko->rko_serve = rkq->rkq_serve;
-                rko->rko_serve_opaque = rkq->rkq_opaque;
-        }
-
-        if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
-                rd_kafka_q_enq0(rkq, rko, 1/*at_head*/);
-                cnd_signal(&rkq->rkq_cond);
-                if (rkq->rkq_qlen == 1)
-                        rd_kafka_q_io_event(rkq);
-        } else {
-                rd_kafka_q_enq(fwdq, rko);
-                rd_kafka_q_destroy(fwdq);
-        }
-
-        return 1;
-}
-
-
-/**
- * Dequeue 'rko' from queue 'rkq'.
- *
- * NOTE: rkq_lock MUST be held
- * Locality: any thread
- */
-static RD_INLINE RD_UNUSED
-void rd_kafka_q_deq0 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) {
-        rd_dassert(rkq->rkq_flags & RD_KAFKA_Q_F_READY);
-	rd_dassert(rkq->rkq_qlen > 0 &&
-                   rkq->rkq_qsize >= (int64_t)rko->rko_len);
-
-        TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link);
-        rkq->rkq_qlen--;
-        rkq->rkq_qsize -= rko->rko_len;
-}
-
-/**
- * Concat all elements of 'srcq' onto tail of 'rkq'.
- * 'rkq' will be be locked (if 'do_lock'==1), but 'srcq' will not.
- * NOTE: 'srcq' will be reset.
- *
- * Locality: any thread.
- *
- * @returns 0 if operation was performed or -1 if rkq is disabled.
- */
-static RD_INLINE RD_UNUSED
-int rd_kafka_q_concat0 (rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, int do_lock) {
-	int r = 0;
-
-	while (srcq->rkq_fwdq) /* Resolve source queue */
-		srcq = srcq->rkq_fwdq;
-	if (unlikely(srcq->rkq_qlen == 0))
-		return 0; /* Don't do anything if source queue is empty */
-
-	if (do_lock)
-		mtx_lock(&rkq->rkq_lock);
-	if (!rkq->rkq_fwdq) {
-                rd_kafka_op_t *rko;
-
-                rd_dassert(TAILQ_EMPTY(&srcq->rkq_q) ||
-                           srcq->rkq_qlen > 0);
-		if (unlikely(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY))) {
-                        if (do_lock)
-                                mtx_unlock(&rkq->rkq_lock);
-			return -1;
-		}
-                /* First insert any prioritized ops from srcq
-                 * in the right position in rkq. */
-                while ((rko = TAILQ_FIRST(&srcq->rkq_q)) && rko->rko_prio > 0) {
-                        TAILQ_REMOVE(&srcq->rkq_q, rko, rko_link);
-                        TAILQ_INSERT_SORTED(&rkq->rkq_q, rko,
-                                            rd_kafka_op_t *, rko_link,
-                                            rd_kafka_op_cmp_prio);
-                }
-
-		TAILQ_CONCAT(&rkq->rkq_q, &srcq->rkq_q, rko_link);
-		if (rkq->rkq_qlen == 0)
-			rd_kafka_q_io_event(rkq);
-                rkq->rkq_qlen += srcq->rkq_qlen;
-                rkq->rkq_qsize += srcq->rkq_qsize;
-		cnd_signal(&rkq->rkq_cond);
-
-                rd_kafka_q_reset(srcq);
-	} else
-		r = rd_kafka_q_concat0(rkq->rkq_fwdq ? rkq->rkq_fwdq : rkq,
-				       srcq,
-				       rkq->rkq_fwdq ? do_lock : 0);
-	if (do_lock)
-		mtx_unlock(&rkq->rkq_lock);
-
-	return r;
-}
-
-#define rd_kafka_q_concat(dstq,srcq) rd_kafka_q_concat0(dstq,srcq,1/*lock*/)
-
-
-/**
- * @brief Prepend all elements of 'srcq' onto head of 'rkq'.
- * 'rkq' will be be locked (if 'do_lock'==1), but 'srcq' will not.
- * 'srcq' will be reset.
- *
- * @remark Will not respect priority of ops, srcq will be prepended in its
- *         original form to rkq.
- *
- * @locality any thread.
- */
-static RD_INLINE RD_UNUSED
-void rd_kafka_q_prepend0 (rd_kafka_q_t *rkq, rd_kafka_q_t *srcq,
-                          int do_lock) {
-	if (do_lock)
-		mtx_lock(&rkq->rkq_lock);
-	if (!rkq->rkq_fwdq && !srcq->rkq_fwdq) {
-                /* FIXME: prio-aware */
-                /* Concat rkq on srcq */
-                TAILQ_CONCAT(&srcq->rkq_q, &rkq->rkq_q, rko_link);
-                /* Move srcq to rkq */
-                TAILQ_MOVE(&rkq->rkq_q, &srcq->rkq_q, rko_link);
-		if (rkq->rkq_qlen == 0 && srcq->rkq_qlen > 0)
-			rd_kafka_q_io_event(rkq);
-                rkq->rkq_qlen += srcq->rkq_qlen;
-                rkq->rkq_qsize += srcq->rkq_qsize;
-
-                rd_kafka_q_reset(srcq);
-	} else
-		rd_kafka_q_prepend0(rkq->rkq_fwdq ? rkq->rkq_fwdq : rkq,
-                                    srcq->rkq_fwdq ? srcq->rkq_fwdq : srcq,
-                                    rkq->rkq_fwdq ? do_lock : 0);
-	if (do_lock)
-		mtx_unlock(&rkq->rkq_lock);
-}
-
-#define rd_kafka_q_prepend(dstq,srcq) rd_kafka_q_prepend0(dstq,srcq,1/*lock*/)
-
-
-/* Returns the number of elements in the queue */
-static RD_INLINE RD_UNUSED
-int rd_kafka_q_len (rd_kafka_q_t *rkq) {
-        int qlen;
-        rd_kafka_q_t *fwdq;
-        mtx_lock(&rkq->rkq_lock);
-        if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
-                qlen = rkq->rkq_qlen;
-                mtx_unlock(&rkq->rkq_lock);
-        } else {
-                mtx_unlock(&rkq->rkq_lock);
-                qlen = rd_kafka_q_len(fwdq);
-                rd_kafka_q_destroy(fwdq);
-        }
-        return qlen;
-}
-
-/* Returns the total size of elements in the queue */
-static RD_INLINE RD_UNUSED
-uint64_t rd_kafka_q_size (rd_kafka_q_t *rkq) {
-        uint64_t sz;
-        rd_kafka_q_t *fwdq;
-        mtx_lock(&rkq->rkq_lock);
-        if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
-                sz = rkq->rkq_qsize;
-                mtx_unlock(&rkq->rkq_lock);
-        } else {
-                mtx_unlock(&rkq->rkq_lock);
-                sz = rd_kafka_q_size(fwdq);
-                rd_kafka_q_destroy(fwdq);
-        }
-        return sz;
-}
-
-
-/* Construct temporary on-stack replyq with increased Q refcount and
- * optional VERSION. */
-#if ENABLE_DEVEL
-#define RD_KAFKA_REPLYQ(Q,VERSION) \
-	(rd_kafka_replyq_t){rd_kafka_q_keep(Q), VERSION, \
-			rd_strdup(__FUNCTION__) }
-#else
-#define RD_KAFKA_REPLYQ(Q,VERSION) \
-	(rd_kafka_replyq_t){rd_kafka_q_keep(Q), VERSION}
-#endif
-
-/* Construct temporary on-stack replyq for indicating no replyq. */
-#if ENABLE_DEVEL
-#define RD_KAFKA_NO_REPLYQ (rd_kafka_replyq_t){NULL, 0, NULL}
-#else
-#define RD_KAFKA_NO_REPLYQ (rd_kafka_replyq_t){NULL, 0}
-#endif
-
-/**
- * Set up replyq.
- * Q refcnt is increased.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_set_replyq (rd_kafka_replyq_t *replyq,
-		     rd_kafka_q_t *rkq, int32_t version) {
-	replyq->q = rkq ? rd_kafka_q_keep(rkq) : NULL;
-	replyq->version = version;
-#if ENABLE_DEVEL
-	replyq->_id = strdup(__FUNCTION__);
-#endif
-}
-
-/**
- * Set rko's replyq with an optional version (versionptr != NULL).
- * Q refcnt is increased.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_op_set_replyq (rd_kafka_op_t *rko, rd_kafka_q_t *rkq,
-			rd_atomic32_t *versionptr) {
-	rd_kafka_set_replyq(&rko->rko_replyq, rkq,
-			    versionptr ? rd_atomic32_get(versionptr) : 0);
-}
-
-/* Set reply rko's version from replyq's version */
-#define rd_kafka_op_get_reply_version(REPLY_RKO, ORIG_RKO) do {		\
-		(REPLY_RKO)->rko_version = (ORIG_RKO)->rko_replyq.version; \
-	} while (0)
-
-
-/* Clear replyq holder without decreasing any .q references. */
-static RD_INLINE RD_UNUSED void
-rd_kafka_replyq_clear (rd_kafka_replyq_t *replyq) {
-	memset(replyq, 0, sizeof(*replyq));
-}
-
-/**
- * @brief Make a copy of \p src in \p dst, with its own queue reference
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_replyq_copy (rd_kafka_replyq_t *dst, rd_kafka_replyq_t *src) {
-        dst->version = src->version;
-        dst->q = src->q;
-        if (dst->q)
-                rd_kafka_q_keep(dst->q);
-#if ENABLE_DEVEL
-        if (src->_id)
-                dst->_id = rd_strdup(src->_id);
-        else
-                dst->_id = NULL;
-#endif
-}
-
-
-/**
- * Clear replyq holder and destroy any .q references.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_replyq_destroy (rd_kafka_replyq_t *replyq) {
-	if (replyq->q)
-		rd_kafka_q_destroy(replyq->q);
-#if ENABLE_DEVEL
-	if (replyq->_id) {
-		rd_free(replyq->_id);
-		replyq->_id = NULL;
-	}
-#endif
-	rd_kafka_replyq_clear(replyq);
-}
-
-
-/**
- * @brief Wrapper for rd_kafka_q_enq() that takes a replyq,
- *        steals its queue reference, enqueues the op with the replyq version,
- *        and then destroys the queue reference.
- *
- *        If \p version is non-zero it will be updated, else replyq->version.
- *
- * @returns Same as rd_kafka_q_enq()
- */
-static RD_INLINE RD_UNUSED int
-rd_kafka_replyq_enq (rd_kafka_replyq_t *replyq, rd_kafka_op_t *rko,
-		     int version) {
-	rd_kafka_q_t *rkq = replyq->q;
-	int r;
-
-	if (version)
-		rko->rko_version = version;
-	else
-		rko->rko_version = replyq->version;
-
-	/* The replyq queue reference is done after we've enqueued the rko
-	 * so clear it here. */
-	replyq->q = NULL;
-
-#if ENABLE_DEVEL
-	if (replyq->_id) {
-		rd_free(replyq->_id);
-		replyq->_id = NULL;
-	}
-#endif
-
-	/* Retain replyq->version since it is used by buf_callback
-	 * when dispatching the callback. */
-
-	r = rd_kafka_q_enq(rkq, rko);
-
-	rd_kafka_q_destroy(rkq);
-
-	return r;
-}
-
-
-
-rd_kafka_op_t *rd_kafka_q_pop_serve (rd_kafka_q_t *rkq, int timeout_ms,
-				     int32_t version,
-                                     rd_kafka_q_cb_type_t cb_type,
-                                     rd_kafka_q_serve_cb_t *callback,
-				     void *opaque);
-rd_kafka_op_t *rd_kafka_q_pop (rd_kafka_q_t *rkq, int timeout_ms,
-                               int32_t version);
-int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms, int max_cnt,
-                      rd_kafka_q_cb_type_t cb_type,
-                      rd_kafka_q_serve_cb_t *callback,
-                      void *opaque);
-
-int  rd_kafka_q_purge0 (rd_kafka_q_t *rkq, int do_lock);
-#define rd_kafka_q_purge(rkq) rd_kafka_q_purge0(rkq, 1/*lock*/)
-void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq,
-                                      rd_kafka_toppar_t *rktp, int version);
-
-int rd_kafka_q_move_cnt (rd_kafka_q_t *dstq, rd_kafka_q_t *srcq,
-			 int cnt, int do_locks);
-
-int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms,
-                                 rd_kafka_message_t **rkmessages,
-                                 size_t rkmessages_size);
-rd_kafka_resp_err_t rd_kafka_q_wait_result (rd_kafka_q_t *rkq, int timeout_ms);
-
-int rd_kafka_q_apply (rd_kafka_q_t *rkq,
-                      int (*callback) (rd_kafka_q_t *rkq, rd_kafka_op_t *rko,
-                                       void *opaque),
-                      void *opaque);
-
-void rd_kafka_q_fix_offsets (rd_kafka_q_t *rkq, int64_t min_offset,
-			     int64_t base_offset);
-
-/**
- * @returns the last op in the queue matching \p op_type and \p allow_err (bool)
- * @remark The \p rkq must be properly locked before this call, the returned rko
- *         is not removed from the queue and may thus not be held for longer
- *         than the lock is held.
- */
-static RD_INLINE RD_UNUSED
-rd_kafka_op_t *rd_kafka_q_last (rd_kafka_q_t *rkq, rd_kafka_op_type_t op_type,
-				int allow_err) {
-	rd_kafka_op_t *rko;
-	TAILQ_FOREACH_REVERSE(rko, &rkq->rkq_q, rd_kafka_op_tailq, rko_link) {
-		if (rko->rko_type == op_type &&
-		    (allow_err || !rko->rko_err))
-			return rko;
-	}
-
-	return NULL;
-}
-
-void rd_kafka_q_io_event_enable (rd_kafka_q_t *rkq, int fd,
-                                 const void *payload, size_t size);
-
-/* Public interface */
-struct rd_kafka_queue_s {
-	rd_kafka_q_t *rkqu_q;
-        rd_kafka_t   *rkqu_rk;
-};
-
-
-void rd_kafka_q_dump (FILE *fp, rd_kafka_q_t *rkq);
-
-extern int RD_TLS rd_kafka_yield_thread;

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_range_assignor.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_range_assignor.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_range_assignor.c
deleted file mode 100644
index dfa9893..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_range_assignor.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#include "rdkafka_int.h"
-#include "rdkafka_assignor.h"
-
-
-
-
-
-
-/**
- * Source: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java
- *
- * The range assignor works on a per-topic basis. For each topic, we lay out the available partitions in numeric order
- * and the consumers in lexicographic order. We then divide the number of partitions by the total number of
- * consumers to determine the number of partitions to assign to each consumer. If it does not evenly
- * divide, then the first few consumers will have one extra partition.
- *
- * For example, suppose there are two consumers C0 and C1, two topics t0 and t1, and each topic has 3 partitions,
- * resulting in partitions t0p0, t0p1, t0p2, t1p0, t1p1, and t1p2.
- *
- * The assignment will be:
- * C0: [t0p0, t0p1, t1p0, t1p1]
- * C1: [t0p2, t1p2]
- */
-
-rd_kafka_resp_err_t
-rd_kafka_range_assignor_assign_cb (rd_kafka_t *rk,
-                                   const char *member_id,
-                                   const char *protocol_name,
-                                   const rd_kafka_metadata_t *metadata,
-                                   rd_kafka_group_member_t *members,
-                                   size_t member_cnt,
-                                   rd_kafka_assignor_topic_t **eligible_topics,
-                                   size_t eligible_topic_cnt,
-                                   char *errstr, size_t errstr_size,
-                                   void *opaque) {
-        unsigned int ti;
-        int i;
-
-        /* The range assignor works on a per-topic basis. */
-        for (ti = 0 ; ti < eligible_topic_cnt ; ti++) {
-                rd_kafka_assignor_topic_t *eligible_topic = eligible_topics[ti];
-                int numPartitionsPerConsumer;
-                int consumersWithExtraPartition;
-
-                /* For each topic, we lay out the available partitions in
-                 * numeric order and the consumers in lexicographic order. */
-                rd_list_sort(&eligible_topic->members,
-			     rd_kafka_group_member_cmp);
-
-                /* We then divide the number of partitions by the total number of
-                 * consumers to determine the number of partitions to assign to
-                 * each consumer. */
-                numPartitionsPerConsumer =
-                        eligible_topic->metadata->partition_cnt /
-                        rd_list_cnt(&eligible_topic->members);
-
-                /* If it does not evenly divide, then the first few consumers
-                 * will have one extra partition. */
-                 consumersWithExtraPartition =
-                         eligible_topic->metadata->partition_cnt %
-                         rd_list_cnt(&eligible_topic->members);
-
-                 rd_kafka_dbg(rk, CGRP, "ASSIGN",
-                              "range: Topic %s with %d partition(s) and "
-                              "%d subscribing member(s)",
-                              eligible_topic->metadata->topic,
-                              eligible_topic->metadata->partition_cnt,
-                              rd_list_cnt(&eligible_topic->members));
-
-                 for (i = 0 ; i < rd_list_cnt(&eligible_topic->members) ; i++) {
-                         rd_kafka_group_member_t *rkgm =
-                                 rd_list_elem(&eligible_topic->members, i);
-                         int start = numPartitionsPerConsumer * i +
-                                 RD_MIN(i, consumersWithExtraPartition);
-                         int length = numPartitionsPerConsumer +
-                                 (i + 1 > consumersWithExtraPartition ? 0 : 1);
-
-			 if (length == 0)
-				 continue;
-
-                         rd_kafka_dbg(rk, CGRP, "ASSIGN",
-                                      "range: Member \"%s\": "
-                                      "assigned topic %s partitions %d..%d",
-                                      rkgm->rkgm_member_id->str,
-                                      eligible_topic->metadata->topic,
-                                      start, start+length-1);
-                         rd_kafka_topic_partition_list_add_range(
-                                 rkgm->rkgm_assignment,
-                                 eligible_topic->metadata->topic,
-                                 start, start+length-1);
-                 }
-        }
-
-        return 0;
-}
-
-
-


[46/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/examples/rdkafka_zookeeper_example.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/examples/rdkafka_zookeeper_example.c b/thirdparty/librdkafka-0.11.1/examples/rdkafka_zookeeper_example.c
deleted file mode 100644
index 2f9a61e..0000000
--- a/thirdparty/librdkafka-0.11.1/examples/rdkafka_zookeeper_example.c
+++ /dev/null
@@ -1,728 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Apache Kafka consumer & producer example programs
- * using the Kafka driver from librdkafka
- * (https://github.com/edenhill/librdkafka)
- */
-
-#include <ctype.h>
-#include <signal.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <syslog.h>
-#include <sys/time.h>
-#include <errno.h>
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is builtin from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h"  /* for Kafka driver */
-
-#include <zookeeper.h>
-#include <zookeeper.jute.h>
-#include <jansson.h>
-
-#define BROKER_PATH "/brokers/ids"
-
-static int run = 1;
-static rd_kafka_t *rk;
-static int exit_eof = 0;
-static int quiet = 0;
-static 	enum {
-	OUTPUT_HEXDUMP,
-	OUTPUT_RAW,
-} output = OUTPUT_HEXDUMP;
-
-static void stop (int sig) {
-	run = 0;
-	fclose(stdin); /* abort fgets() */
-}
-
-
-static void hexdump (FILE *fp, const char *name, const void *ptr, size_t len) {
-	const char *p = (const char *)ptr;
-	int of = 0;
-
-
-	if (name)
-		fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
-
-	for (of = 0 ; of < len ; of += 16) {
-		char hexen[16*3+1];
-		char charen[16+1];
-		int hof = 0;
-
-		int cof = 0;
-		int i;
-
-		for (i = of ; i < of + 16 && i < len ; i++) {
-			hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff);
-			cof += sprintf(charen+cof, "%c",
-				       isprint((int)p[i]) ? p[i] : '.');
-		}
-		fprintf(fp, "%08x: %-48s %-16s\n",
-			of, hexen, charen);
-	}
-}
-
-/**
- * Kafka logger callback (optional)
- */
-static void logger (const rd_kafka_t *rk, int level,
-		    const char *fac, const char *buf) {
-	struct timeval tv;
-	gettimeofday(&tv, NULL);
-	fprintf(stderr, "%u.%03u RDKAFKA-%i-%s: %s: %s\n",
-		(int)tv.tv_sec, (int)(tv.tv_usec / 1000),
-		level, fac, rd_kafka_name(rk), buf);
-}
-
-/**
- * Message delivery report callback.
- * Called once for each message.
- * See rdkafka.h for more information.
- */
-static void msg_delivered (rd_kafka_t *rk,
-			   void *payload, size_t len,
-			   int error_code,
-			   void *opaque, void *msg_opaque) {
-
-	if (error_code)
-		fprintf(stderr, "%% Message delivery failed: %s\n",
-			rd_kafka_err2str(error_code));
-	else if (!quiet)
-		fprintf(stderr, "%% Message delivered (%zd bytes)\n", len);
-}
-
-
-static void msg_consume (rd_kafka_message_t *rkmessage,
-			 void *opaque) {
-	if (rkmessage->err) {
-		if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
-			fprintf(stderr,
-				"%% Consumer reached end of %s [%"PRId32"] "
-			       "message queue at offset %"PRId64"\n",
-			       rd_kafka_topic_name(rkmessage->rkt),
-			       rkmessage->partition, rkmessage->offset);
-
-			if (exit_eof)
-				run = 0;
-
-			return;
-		}
-
-		fprintf(stderr, "%% Consume error for topic \"%s\" [%"PRId32"] "
-		       "offset %"PRId64": %s\n",
-		       rd_kafka_topic_name(rkmessage->rkt),
-		       rkmessage->partition,
-		       rkmessage->offset,
-		       rd_kafka_message_errstr(rkmessage));
-		return;
-	}
-
-	if (!quiet)
-		fprintf(stdout, "%% Message (offset %"PRId64", %zd bytes):\n",
-			rkmessage->offset, rkmessage->len);
-
-	if (rkmessage->key_len) {
-		if (output == OUTPUT_HEXDUMP)
-			hexdump(stdout, "Message Key",
-				rkmessage->key, rkmessage->key_len);
-		else
-			printf("Key: %.*s\n",
-			       (int)rkmessage->key_len, (char *)rkmessage->key);
-	}
-
-	if (output == OUTPUT_HEXDUMP)
-		hexdump(stdout, "Message Payload",
-			rkmessage->payload, rkmessage->len);
-	else
-		printf("%.*s\n",
-		       (int)rkmessage->len, (char *)rkmessage->payload);
-}
-
-
-static void metadata_print (const char *topic,
-                            const struct rd_kafka_metadata *metadata) {
-        int i, j, k;
-
-        printf("Metadata for %s (from broker %"PRId32": %s):\n",
-               topic ? : "all topics",
-               metadata->orig_broker_id,
-               metadata->orig_broker_name);
-
-
-        /* Iterate brokers */
-        printf(" %i brokers:\n", metadata->broker_cnt);
-        for (i = 0 ; i < metadata->broker_cnt ; i++)
-                printf("  broker %"PRId32" at %s:%i\n",
-                       metadata->brokers[i].id,
-                       metadata->brokers[i].host,
-                       metadata->brokers[i].port);
-
-        /* Iterate topics */
-        printf(" %i topics:\n", metadata->topic_cnt);
-        for (i = 0 ; i < metadata->topic_cnt ; i++) {
-                const struct rd_kafka_metadata_topic *t = &metadata->topics[i];
-                printf("  topic \"%s\" with %i partitions:",
-                       t->topic,
-                       t->partition_cnt);
-                if (t->err) {
-                        printf(" %s", rd_kafka_err2str(t->err));
-                        if (t->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
-                                printf(" (try again)");
-                }
-                printf("\n");
-
-                /* Iterate topic's partitions */
-                for (j = 0 ; j < t->partition_cnt ; j++) {
-                        const struct rd_kafka_metadata_partition *p;
-                        p = &t->partitions[j];
-                        printf("    partition %"PRId32", "
-                               "leader %"PRId32", replicas: ",
-                               p->id, p->leader);
-
-                        /* Iterate partition's replicas */
-                        for (k = 0 ; k < p->replica_cnt ; k++)
-                                printf("%s%"PRId32,
-                                       k > 0 ? ",":"", p->replicas[k]);
-
-                        /* Iterate partition's ISRs */
-                        printf(", isrs: ");
-                        for (k = 0 ; k < p->isr_cnt ; k++)
-                                printf("%s%"PRId32,
-                                       k > 0 ? ",":"", p->isrs[k]);
-                        if (p->err)
-                                printf(", %s\n", rd_kafka_err2str(p->err));
-                        else
-                                printf("\n");
-                }
-        }
-}
-
-
-static void set_brokerlist_from_zookeeper(zhandle_t *zzh, char *brokers)
-{
-	if (zzh)
-	{
-		struct String_vector brokerlist;
-		if (zoo_get_children(zzh, BROKER_PATH, 1, &brokerlist) != ZOK)
-		{
-			fprintf(stderr, "No brokers found on path %s\n", BROKER_PATH);
-			return;
-		}
-
-		int i;
-		char *brokerptr = brokers;
-		for (i = 0; i < brokerlist.count; i++)
-		{
-			char path[255], cfg[1024];
-			sprintf(path, "/brokers/ids/%s", brokerlist.data[i]);
-			int len = sizeof(cfg);
-			zoo_get(zzh, path, 0, cfg, &len, NULL);
-
-			if (len > 0)
-			{
-				cfg[len] = '\0';
-				json_error_t jerror;
-				json_t *jobj = json_loads(cfg, 0, &jerror);
-				if (jobj)
-				{
-					json_t *jhost = json_object_get(jobj, "host");
-					json_t *jport = json_object_get(jobj, "port");
-
-					if (jhost && jport)
-					{
-						const char *host = json_string_value(jhost);
-						const int   port = json_integer_value(jport);
-						sprintf(brokerptr, "%s:%d", host, port);
-
-						brokerptr += strlen(brokerptr);
-						if (i < brokerlist.count - 1)
-						{
-							*brokerptr++ = ',';
-						}
-					}
-					json_decref(jobj);
-				}
-			}
-		}
-		deallocate_String_vector(&brokerlist);
-		printf("Found brokers %s\n", brokers);
-	}
-}
-
-
-static void watcher(zhandle_t *zh, int type, int state, const char *path, void *watcherCtx)
-{
-	char brokers[1024];
-	if (type == ZOO_CHILD_EVENT && strncmp(path, BROKER_PATH, sizeof(BROKER_PATH) - 1) == 0)
-	{
-		brokers[0] = '\0';
-		set_brokerlist_from_zookeeper(zh, brokers);
-		if (brokers[0] != '\0' && rk != NULL)
-		{
-			rd_kafka_brokers_add(rk, brokers);
-			rd_kafka_poll(rk, 10);
-		}
-	}
-}
-
-
-static zhandle_t* initialize_zookeeper(const char * zookeeper, const int debug)
-{
-	zhandle_t *zh;
-	if (debug)
-	{
-		zoo_set_debug_level(ZOO_LOG_LEVEL_DEBUG);
-	}
-	zh = zookeeper_init(zookeeper, watcher, 10000, 0, 0, 0);
-	if (zh == NULL)
-	{
-		fprintf(stderr, "Zookeeper connection not established.");
-		exit(1);
-	}
-	return zh;
-}
-
-
-static void sig_usr1 (int sig) {
-	rd_kafka_dump(stdout, rk);
-}
-
-int main (int argc, char **argv) {
-	rd_kafka_topic_t *rkt;
-	char *zookeeper = "localhost:2181";
-	zhandle_t *zh = NULL;
-	char brokers[1024];
-	char mode = 'C';
-	char *topic = NULL;
-	int partition = RD_KAFKA_PARTITION_UA;
-	int opt;
-	rd_kafka_conf_t *conf;
-	rd_kafka_topic_conf_t *topic_conf;
-	char errstr[512];
-	const char *debug = NULL;
-	int64_t start_offset = 0;
-	int do_conf_dump = 0;
-
-	memset(brokers, 0, sizeof(brokers));
-	quiet = !isatty(STDIN_FILENO);
-
-	/* Kafka configuration */
-	conf = rd_kafka_conf_new();
-
-	/* Topic configuration */
-	topic_conf = rd_kafka_topic_conf_new();
-
-	while ((opt = getopt(argc, argv, "PCLt:p:k:z:qd:o:eX:A")) != -1) {
-		switch (opt) {
-		case 'P':
-		case 'C':
-                case 'L':
-			mode = opt;
-			break;
-		case 't':
-			topic = optarg;
-			break;
-		case 'p':
-			partition = atoi(optarg);
-			break;
-		case 'k':
-			zookeeper = optarg;
-			break;
-		case 'z':
-			if (rd_kafka_conf_set(conf, "compression.codec",
-					      optarg,
-					      errstr, sizeof(errstr)) !=
-			    RD_KAFKA_CONF_OK) {
-				fprintf(stderr, "%% %s\n", errstr);
-				exit(1);
-			}
-			break;
-		case 'o':
-			if (!strcmp(optarg, "end"))
-				start_offset = RD_KAFKA_OFFSET_END;
-			else if (!strcmp(optarg, "beginning"))
-				start_offset = RD_KAFKA_OFFSET_BEGINNING;
-			else if (!strcmp(optarg, "stored"))
-				start_offset = RD_KAFKA_OFFSET_STORED;
-			else
-				start_offset = strtoll(optarg, NULL, 10);
-			break;
-		case 'e':
-			exit_eof = 1;
-			break;
-		case 'd':
-			debug = optarg;
-			break;
-		case 'q':
-			quiet = 1;
-			break;
-		case 'A':
-			output = OUTPUT_RAW;
-			break;
-		case 'X':
-		{
-			char *name, *val;
-			rd_kafka_conf_res_t res;
-
-			if (!strcmp(optarg, "list") ||
-			    !strcmp(optarg, "help")) {
-				rd_kafka_conf_properties_show(stdout);
-				exit(0);
-			}
-
-			if (!strcmp(optarg, "dump")) {
-				do_conf_dump = 1;
-				continue;
-			}
-
-			name = optarg;
-			if (!(val = strchr(name, '='))) {
-				fprintf(stderr, "%% Expected "
-					"-X property=value, not %s\n", name);
-				exit(1);
-			}
-
-			*val = '\0';
-			val++;
-
-			res = RD_KAFKA_CONF_UNKNOWN;
-			/* Try "topic." prefixed properties on topic
-			 * conf first, and then fall through to global if
-			 * it didnt match a topic configuration property. */
-			if (!strncmp(name, "topic.", strlen("topic.")))
-				res = rd_kafka_topic_conf_set(topic_conf,
-							      name+
-							      strlen("topic."),
-							      val,
-							      errstr,
-							      sizeof(errstr));
-
-			if (res == RD_KAFKA_CONF_UNKNOWN)
-				res = rd_kafka_conf_set(conf, name, val,
-							errstr, sizeof(errstr));
-
-			if (res != RD_KAFKA_CONF_OK) {
-				fprintf(stderr, "%% %s\n", errstr);
-				exit(1);
-			}
-		}
-		break;
-
-		default:
-			goto usage;
-		}
-	}
-
-
-	if (do_conf_dump) {
-		const char **arr;
-		size_t cnt;
-		int pass;
-
-		for (pass = 0 ; pass < 2 ; pass++) {
-			int i;
-
-			if (pass == 0) {
-				arr = rd_kafka_conf_dump(conf, &cnt);
-				printf("# Global config\n");
-			} else {
-				printf("# Topic config\n");
-				arr = rd_kafka_topic_conf_dump(topic_conf,
-							       &cnt);
-			}
-
-			for (i = 0 ; i < cnt ; i += 2)
-				printf("%s = %s\n",
-				       arr[i], arr[i+1]);
-
-			printf("\n");
-
-			rd_kafka_conf_dump_free(arr, cnt);
-		}
-
-		exit(0);
-	}
-
-
-	if (optind != argc || (mode != 'L' && !topic)) {
-	usage:
-		fprintf(stderr,
-			"Usage: %s -C|-P|-L -t <topic> "
-			"[-p <partition>] [-b <host1:port1,host2:port2,..>]\n"
-			"\n"
-			"librdkafka version %s (0x%08x)\n"
-			"\n"
-			" Options:\n"
-			"  -C | -P         Consumer or Producer mode\n"
-                        "  -L              Metadata list mode\n"
-			"  -t <topic>      Topic to fetch / produce\n"
-			"  -p <num>        Partition (random partitioner)\n"
-			"  -k <zookeepers> Zookeeper address (localhost:2181)\n"
-			"  -z <codec>      Enable compression:\n"
-			"                  none|gzip|snappy\n"
-			"  -o <offset>     Start offset (consumer)\n"
-			"  -e              Exit consumer when last message\n"
-			"                  in partition has been received.\n"
-			"  -d [facs..]     Enable debugging contexts:\n"
-			"  -q              Be quiet\n"
-			"  -A              Raw payload output (consumer)\n"
-			"                  %s\n"
-			"  -X <prop=name> Set arbitrary librdkafka "
-			"configuration property\n"
-			"               Properties prefixed with \"topic.\" "
-			"will be set on topic object.\n"
-			"               Use '-X list' to see the full list\n"
-			"               of supported properties.\n"
-			"\n"
-			" In Consumer mode:\n"
-			"  writes fetched messages to stdout\n"
-			" In Producer mode:\n"
-			"  reads messages from stdin and sends to broker\n"
-                        " In List mode:\n"
-                        "  queries broker for metadata information, "
-                        "topic is optional.\n"
-			"\n"
-			"\n"
-			"\n",
-			argv[0],
-			rd_kafka_version_str(), rd_kafka_version(),
-			RD_KAFKA_DEBUG_CONTEXTS);
-		exit(1);
-	}
-
-
-	signal(SIGINT, stop);
-	signal(SIGUSR1, sig_usr1);
-
-	if (debug &&
-	    rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) !=
-	    RD_KAFKA_CONF_OK) {
-		fprintf(stderr, "%% Debug configuration failed: %s: %s\n",
-			errstr, debug);
-		exit(1);
-	}
-
-        /* Set logger */
-        rd_kafka_conf_set_log_cb(conf, logger);
-
-	/** Initialize zookeeper */
-	zh = initialize_zookeeper(zookeeper, debug != NULL);
-
-	/* Add brokers */
-	set_brokerlist_from_zookeeper(zh, brokers);
-        if (rd_kafka_conf_set(conf, "metadata.broker.list",
-                              brokers, errstr, sizeof(errstr) !=
-                              RD_KAFKA_CONF_OK)) {
-                fprintf(stderr, "%% Failed to set brokers: %s\n", errstr);
-                exit(1);
-        }
-
-	if (debug) {
-		printf("Broker list from zookeeper cluster %s: %s\n", zookeeper, brokers);
-	}
-
-	if (mode == 'P') {
-		/*
-		 * Producer
-		 */
-		char buf[2048];
-		int sendcnt = 0;
-
-		/* Set up a message delivery report callback.
-		 * It will be called once for each message, either on successful
-		 * delivery to broker, or upon failure to deliver to broker. */
-		rd_kafka_conf_set_dr_cb(conf, msg_delivered);
-
-		/* Create Kafka handle */
-		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
-					errstr, sizeof(errstr)))) {
-			fprintf(stderr,
-				"%% Failed to create new producer: %s\n",
-				errstr);
-			exit(1);
-		}
-
-		/* Create topic */
-		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
-
-		if (!quiet)
-			fprintf(stderr,
-				"%% Type stuff and hit enter to send\n");
-
-		while (run && fgets(buf, sizeof(buf), stdin)) {
-			size_t len = strlen(buf);
-			if (buf[len-1] == '\n')
-				buf[--len] = '\0';
-
-			/* Send/Produce message. */
-			if (rd_kafka_produce(rkt, partition,
-					     RD_KAFKA_MSG_F_COPY,
-					     /* Payload and length */
-					     buf, len,
-					     /* Optional key and its length */
-					     NULL, 0,
-					     /* Message opaque, provided in
-					      * delivery report callback as
-					      * msg_opaque. */
-					     NULL) == -1) {
-				fprintf(stderr,
-					"%% Failed to produce to topic %s "
-					"partition %i: %s\n",
-					rd_kafka_topic_name(rkt), partition,
-					rd_kafka_err2str(
-						rd_kafka_errno2err(errno)));
-				/* Poll to handle delivery reports */
-				rd_kafka_poll(rk, 0);
-				continue;
-			}
-
-			if (!quiet)
-				fprintf(stderr, "%% Sent %zd bytes to topic "
-					"%s partition %i\n",
-				len, rd_kafka_topic_name(rkt), partition);
-			sendcnt++;
-			/* Poll to handle delivery reports */
-			rd_kafka_poll(rk, 0);
-		}
-
-		/* Poll to handle delivery reports */
-		rd_kafka_poll(rk, 0);
-
-		/* Wait for messages to be delivered */
-		while (run && rd_kafka_outq_len(rk) > 0)
-			rd_kafka_poll(rk, 100);
-
-		/* Destroy the handle */
-		rd_kafka_destroy(rk);
-
-	} else if (mode == 'C') {
-		/*
-		 * Consumer
-		 */
-
-		/* Create Kafka handle */
-		if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
-					errstr, sizeof(errstr)))) {
-			fprintf(stderr,
-				"%% Failed to create new consumer: %s\n",
-				errstr);
-			exit(1);
-		}
-
-		/* Create topic */
-		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
-
-		/* Start consuming */
-		if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){
-			fprintf(stderr, "%% Failed to start consuming: %s\n",
-				rd_kafka_err2str(rd_kafka_errno2err(errno)));
-			exit(1);
-		}
-
-		while (run) {
-			rd_kafka_message_t *rkmessage;
-
-			/* Consume single message.
-			 * See rdkafka_performance.c for high speed
-			 * consuming of messages. */
-			rkmessage = rd_kafka_consume(rkt, partition, 1000);
-			if (!rkmessage) /* timeout */
-				continue;
-
-			msg_consume(rkmessage, NULL);
-
-			/* Return message to rdkafka */
-			rd_kafka_message_destroy(rkmessage);
-		}
-
-		/* Stop consuming */
-		rd_kafka_consume_stop(rkt, partition);
-
-		rd_kafka_topic_destroy(rkt);
-
-		rd_kafka_destroy(rk);
-
-	} else if (mode == 'L') {
-		rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
-		/* Create Kafka handle */
-		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
-					errstr, sizeof(errstr)))) {
-			fprintf(stderr,
-				"%% Failed to create new producer: %s\n",
-				errstr);
-			exit(1);
-		}
-
-		/* Create topic */
-		if (topic)
-			rkt = rd_kafka_topic_new(rk, topic, topic_conf);
-		else
-			rkt = NULL;
-
-		while (run) {
-				const struct rd_kafka_metadata *metadata;
-
-				/* Fetch metadata */
-				err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt,
-										&metadata, 5000);
-				if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
-						fprintf(stderr,
-								"%% Failed to acquire metadata: %s\n",
-								rd_kafka_err2str(err));
-						run = 0;
-						break;
-				}
-
-				metadata_print(topic, metadata);
-
-				rd_kafka_metadata_destroy(metadata);
-				run = 0;
-		}
-
-		/* Destroy the handle */
-		rd_kafka_destroy(rk);
-
-		/* Exit right away, dont wait for background cleanup, we haven't
-		 * done anything important anyway. */
-		exit(err ? 2 : 0);
-	}
-
-	/* Let background threads clean up and terminate cleanly. */
-	rd_kafka_wait_destroyed(2000);
-
-	/** Free the zookeeper data. */
-	zookeeper_close(zh);
-
-	return 0;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/lds-gen.py
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/lds-gen.py b/thirdparty/librdkafka-0.11.1/lds-gen.py
deleted file mode 100755
index 1136580..0000000
--- a/thirdparty/librdkafka-0.11.1/lds-gen.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Generate linker script to only expose symbols of the public API
-#
-
-import sys
-import re
-
-
-if __name__ == '__main__':
-
-    funcs = list()
-    last_line = ''
-
-    for line in sys.stdin:
-        m = re.match(r'^(\S+.*\s+\**)?(rd_kafka_\S+)\s*\(', line)
-        if m:
-            sym = m.group(2)
-            # Ignore static (unused) functions
-            m2 = re.match(r'(RD_UNUSED|__attribute__\(\(unused\)\))', last_line)
-            if not m2:
-                funcs.append(sym)
-            last_line = ''
-        else:
-            last_line = line
-
-    print('# Automatically generated by lds-gen.py - DO NOT EDIT')
-    print('{\n global:')
-    if len(funcs) == 0:
-        print('    *;')
-    else:
-        for f in sorted(funcs):
-            print('    %s;' % f)
-
-        print('local:\n    *;')
-
-    print('};')

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mainpage.doxy
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mainpage.doxy b/thirdparty/librdkafka-0.11.1/mainpage.doxy
deleted file mode 100644
index 97f2456..0000000
--- a/thirdparty/librdkafka-0.11.1/mainpage.doxy
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * @mainpage librdkafka documentation
- *
- * librdkafka is the Apache Kafka C/C++ client library.
- *
- * @section intro Introduction
- *
- * For an introduction and manual to librdkafka see INTRODUCTION.md
- *
- * @section conf Configuration
- *
- * librdkafka is highly configurable to meet any deployment demands.
- * It is usually safe to leave most configuration properties to their default
- * values.
- *
- * See CONFIGURATION.md for the full list of supported configuration properties.
- *
- * @remark Application developers are recommended to provide a non-hardcoded
- *         interface to librdkafka's string based name-value configuration
- *         property interface, allowing users to configure any librdkafka
- *         property directly without alterations to the application.
- *         This allows for seamless upgrades where linking to a new version
- *         of librdkafka automatically provides new configuration
- *         based features.
-
- *
- * @section c_api C API
- *
- * The C API is documented in rdkafka.h
- *
- * @section cpp_api C++ API
- *
- * The C++ API is documented in rdkafkacpp.h
- */
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mklove/Makefile.base
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mklove/Makefile.base b/thirdparty/librdkafka-0.11.1/mklove/Makefile.base
deleted file mode 100755
index 5f2b775..0000000
--- a/thirdparty/librdkafka-0.11.1/mklove/Makefile.base
+++ /dev/null
@@ -1,193 +0,0 @@
-# Base Makefile providing various standard targets
-# Part of mklove suite but may be used independently.
-
-MKL_RED?=	\033[031m
-MKL_GREEN?=	\033[032m
-MKL_YELLOW?=	\033[033m
-MKL_BLUE?=	\033[034m
-MKL_CLR_RESET?=	\033[0m
-
-DEPS=		$(OBJS:%.o=%.d)
-
-# TOPDIR is "TOPDIR/mklove/../" i.e., TOPDIR.
-# We do it with two dir calls instead of /.. to support mklove being symlinked.
-MKLOVE_DIR := $(dir $(lastword $(MAKEFILE_LIST)))
-TOPDIR = $(MKLOVE_DIR:mklove/=.)
-
-
-# Convert LIBNAME ("libxyz") to "xyz"
-LIBNAME0=$(LIBNAME:lib%=%)
-
-# Silence lousy default ARFLAGS (rv)
-ARFLAGS=
-
-ifndef MKL_MAKEFILE_CONFIG
--include $(TOPDIR)/Makefile.config
-endif
-
-_UNAME_S := $(shell uname -s)
-ifeq ($(_UNAME_S),Darwin)
-	LIBFILENAME=$(LIBNAME).$(LIBVER)$(SOLIB_EXT)
-	LIBFILENAMELINK=$(LIBNAME)$(SOLIB_EXT)
-else
-	LIBFILENAME=$(LIBNAME)$(SOLIB_EXT).$(LIBVER)
-	LIBFILENAMELINK=$(LIBNAME)$(SOLIB_EXT)
-endif
-
-INSTALL?=		install
-INSTALL_PROGRAM?=	$(INSTALL)
-INSTALL_DATA?=		$(INSTALL) -m 644
-
-prefix?=	/usr/local
-exec_prefix?=	$(prefix)
-bindir?=	$(exec_prefix)/bin
-sbindir?=	$(exec_prefix)/sbin
-libexecdir?=	$(exec_prefix)/libexec/  # append PKGNAME on install
-datarootdir?=	$(prefix)/share
-datadir?=	$(datarootdir)		 # append PKGNAME on install
-sysconfdir?=	$(prefix)/etc
-sharedstatedir?=$(prefix)/com
-localestatedir?=$(prefix)/var
-runstatedir?=	$(localestatedir)/run
-includedir?=	$(prefix)/include
-docdir?=	$(datarootdir)/doc/$(PKGNAME)
-infodir?=	$(datarootdir)/info
-libdir?=	$(prefix)/lib
-localedir?=	$(datarootdir)/locale
-pkgconfigdir?=	$(libdir)/pkgconfig
-mandir?=	$(datarootdir)/man
-man1dir?=	$(mandir)/man1
-man2dir?=	$(mandir)/man2
-man3dir?=	$(mandir)/man3
-man4dir?=	$(mandir)/man4
-man5dir?=	$(mandir)/man5
-man6dir?=	$(mandir)/man6
-man7dir?=	$(mandir)/man7
-man8dir?=	$(mandir)/man8
-
-
-# Checks that mklove is set up and ready for building
-mklove-check:
-	@if [ ! -f "$(TOPDIR)/Makefile.config" ]; then \
-		printf "$(MKL_RED)$(TOPDIR)/Makefile.config missing: please run ./configure$(MKL_CLR_RESET)\n" ; \
-		exit 1 ; \
-	fi
-
-%.o: %.c
-	$(CC) -MD -MP $(CPPFLAGS) $(CFLAGS) -c $< -o $@
-
-%.o: %.cpp
-	$(CXX) -MD -MP $(CPPFLAGS) $(CXXFLAGS) -c $< -o $@
-
-
-lib: $(LIBFILENAME) $(LIBNAME).a $(LIBFILENAMELINK) lib-gen-pkg-config
-
-$(LIBNAME).lds: #overridable
-
-$(LIBFILENAME): $(OBJS) $(LIBNAME).lds
-	@printf "$(MKL_YELLOW)Creating shared library $@$(MKL_CLR_RESET)\n"
-	$(CC) $(LDFLAGS) $(LIB_LDFLAGS) $(OBJS) -o $@ $(LIBS)
-
-$(LIBNAME).a:	$(OBJS)
-	@printf "$(MKL_YELLOW)Creating static library $@$(MKL_CLR_RESET)\n"
-	$(AR) rcs$(ARFLAGS) $@ $(OBJS)
-
-$(LIBFILENAMELINK): $(LIBFILENAME)
-	@printf "$(MKL_YELLOW)Creating $@ symlink$(MKL_CLR_RESET)\n"
-	rm -f "$@" && ln -s "$^" "$@"
-
-
-# pkg-config .pc file definition
-ifeq ($(GEN_PKG_CONFIG),y)
-define _PKG_CONFIG_DEF
-prefix=$(prefix)
-libdir=$(libdir)
-includedir=$(includedir)
-
-Name: $(LIBNAME)
-Description: $(MKL_APP_DESC_ONELINE)
-Version: $(MKL_APP_VERSION)
-Cflags: -I$${includedir}
-Libs: -L$${libdir} -l$(LIBNAME0)
-Libs.private: $(patsubst -L%,,$(LIBS))
-endef
-
-export _PKG_CONFIG_DEF
-
-$(LIBNAME0).pc: $(TOPDIR)/Makefile.config
-	@printf "$(MKL_YELLOW)Generating pkg-config file $@$(MKL_CLR_RESET)\n"
-	@echo "$$_PKG_CONFIG_DEF" > $@
-
-lib-gen-pkg-config: $(LIBNAME0).pc
-
-lib-clean-pkg-config:
-	rm -f $(LIBNAME0).pc
-else
-lib-gen-pkg-config:
-lib-clean-pkg-config:
-endif
-
-
-$(BIN): $(OBJS)
-	@printf "$(MKL_YELLOW)Creating program $@$(MKL_CLR_RESET)\n"
-	$(CC) $(CPPFLAGS) $(LDFLAGS) $(OBJS) -o $@ $(LIBS)
-
-
-file-check:
-	@printf "$(MKL_YELLOW)Checking $(LIBNAME) integrity$(MKL_CLR_RESET)\n"
-	@RET=true ; \
-	for f in $(CHECK_FILES) ; do \
-		printf "%-30s " $$f ; \
-		if [ -f "$$f" ]; then \
-			printf "$(MKL_GREEN)OK$(MKL_CLR_RESET)\n" ; \
-		else \
-			printf "$(MKL_RED)MISSING$(MKL_CLR_RESET)\n" ; \
-			RET=false ; \
-		fi ; \
-	done ; \
-	$$RET
-
-
-lib-install:
-	@printf "$(MKL_YELLOW)Install $(LIBNAME) to $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n"
-	$(INSTALL) -d $$DESTDIR$(includedir)/$(PKGNAME) ; \
-	$(INSTALL) -d $$DESTDIR$(libdir) ; \
-	$(INSTALL) $(HDRS) $$DESTDIR$(includedir)/$(PKGNAME) ; \
-	$(INSTALL) $(LIBNAME).a $$DESTDIR$(libdir) ; \
-	$(INSTALL) $(LIBFILENAME) $$DESTDIR$(libdir) ; \
-	[ -f "$(LIBNAME0).pc" ] && ( \
-		$(INSTALL) -d $$DESTDIR$(pkgconfigdir) ; \
-		$(INSTALL) -m 0644 $(LIBNAME0).pc $$DESTDIR$(pkgconfigdir) \
-	) ; \
-	(cd $$DESTDIR$(libdir) && ln -sf $(LIBFILENAME) $(LIBFILENAMELINK))
-
-lib-uninstall:
-	@printf "$(MKL_YELLOW)Uninstall $(LIBNAME) from $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n"
-	for hdr in $(HDRS) ; do \
-		rm -f $$DESTDIR$(includedir)/$(PKGNAME)/$$hdr ; done
-	rm -f $$DESTDIR$(libdir)/$(LIBNAME).a
-	rm -f $$DESTDIR$(libdir)/$(LIBFILENAME)
-	rm -f $$DESTDIR$(libdir)/$(LIBFILENAMELINK)
-	rmdir $$DESTDIR$(includedir)/$(PKGNAME) || true
-
-
-
-bin-install:
-	@printf "$(MKL_YELLOW)Install $(BIN) to $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n"
-	$(INSTALL) -d $$DESTDIR$(bindir) && \
-	$(INSTALL) $(BIN) $$DESTDIR$(bindir) 
-
-bin-uninstall:
-	@printf "$(MKL_YELLOW)Uninstall $(BIN) from $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n"
-	rm -f $$DESTDIR$(bindir)/$(BIN)
-
-
-generic-clean:
-	rm -f $(OBJS) $(DEPS)
-
-lib-clean: generic-clean lib-clean-pkg-config
-	rm -f $(LIBNAME)*.a $(LIBFILENAME) $(LIBFILENAMELINK) \
-		$(LIBNAME).lds
-
-bin-clean: generic-clean
-	rm -f $(BIN)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mklove/modules/configure.atomics
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.atomics b/thirdparty/librdkafka-0.11.1/mklove/modules/configure.atomics
deleted file mode 100644
index 31639a7..0000000
--- a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.atomics
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/bin/bash
-#
-# Checks for atomic ops:
-#  compiler builtin (__sync_..) and portable libatomic's (__atomic_..)
-# Will also provide abstraction by defining the prefix to use.
-#
-# Sets:
-#  HAVE_ATOMICS
-#  HAVE_ATOMICS_32
-#  HAVE_ATOMICS_64
-#  HAVE_ATOMICS_32_ATOMIC   __atomic interface
-#  HAVE_ATOMICS_32_SYNC     __sync interface
-#  HAVE_ATOMICS_64_ATOMIC   __atomic interface
-#  HAVE_ATOMICS_64_SYNC     __sync interface
-#  WITH_LIBATOMIC
-#  LIBS
-#
-#  ATOMIC_OP(OP1,OP2,PTR,VAL)
-#  ATOMIC_OP32(OP1,OP2,PTR,VAL)
-#  ATOMIC_OP64(OP1,OP2,PTR,VAL)
-#   where op* is 'add,sub,fetch'
-#   e.g:  ATOMIC_OP32(add, fetch, &i, 10)
-#         becomes __atomic_add_fetch(&i, 10, ..) or
-#                 __sync_add_and_fetch(&i, 10)
-#
-
-function checks {
-
-
-    # We prefer the newer __atomic stuff, but 64-bit atomics might
-    # require linking with -latomic, so we need to perform these tests
-    # in the proper order:
-    #   __atomic 32
-    #   __atomic 32 -latomic
-    #   __sync 32
-    #
-    #   __atomic 64
-    #   __atomic 64 -latomic
-    #   __sync 64
-
-    local _libs=
-    local _a32="__atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)"
-    local _a64="__atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)"
-
-    # 32-bit:
-    # Try fully builtin __atomic
-    if ! mkl_compile_check __atomic_32 HAVE_ATOMICS_32 cont CC "" \
-        "
-#include <inttypes.h>
-int32_t foo (int32_t i) {
-  return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
-}"
-        then
-        # Try __atomic with -latomic
-        if mkl_compile_check --ldflags="-latomic" __atomic_32_lib HAVE_ATOMICS_32 \
-            cont CC "" \
-            "
-#include <inttypes.h>
-int32_t foo (int32_t i) {
-  return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
-}"
-        then
-            _libs="-latomic"
-            mkl_allvar_set "__atomic_32_lib" "HAVE_ATOMICS_32_ATOMIC" "y"
-        else
-            # Try __sync interface
-            if mkl_compile_check __sync_32 HAVE_ATOMICS_32 disable CC "" \
-                "
-#include <inttypes.h>
-int32_t foo (int32_t i) {
-  return __sync_add_and_fetch(&i, 1);
-}"
-                then
-                _a32="__sync_ ## OP1 ## _and_ ## OP2(PTR, VAL)"
-                mkl_allvar_set "__sync_32" "HAVE_ATOMICS_32_SYNC" "y"
-            else
-                _a32=""
-            fi
-        fi
-    else
-        mkl_allvar_set "__atomic_32" "HAVE_ATOMICS_32_ATOMIC" "y"
-    fi
-
-
-    if [[ ! -z $_a32 ]]; then
-        mkl_define_set "atomic_32" "ATOMIC_OP32(OP1,OP2,PTR,VAL)" "code:$_a32"
-    fi
-
-
-
-    # 64-bit:
-    # Try fully builtin __atomic
-    if ! mkl_compile_check __atomic_64 HAVE_ATOMICS_64 cont CC "" \
-        "
-#include <inttypes.h>
-int64_t foo (int64_t i) {
-  return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
-}"
-        then
-        # Try __atomic with -latomic
-        if mkl_compile_check --ldflags="-latomic" __atomic_64_lib HAVE_ATOMICS_64 \
-            cont CC "" \
-            "
-#include <inttypes.h>
-int64_t foo (int64_t i) {
-  return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
-}"
-        then
-            _libs="-latomic"
-            mkl_allvar_set "__atomic_64_lib" "HAVE_ATOMICS_64_ATOMIC" "y"
-        else
-            # Try __sync interface
-            if mkl_compile_check __sync_64 HAVE_ATOMICS_64 disable CC "" \
-                "
-#include <inttypes.h>
-int64_t foo (int64_t i) {
-  return __sync_add_and_fetch(&i, 1);
-}"
-                then
-                _a64="__sync_ ## OP1 ## _and_ ## OP2 (PTR, VAL)"
-                mkl_allvar_set "__sync_64" "HAVE_ATOMICS_64_SYNC" "y"
-            else
-                _a64=""
-            fi
-        fi
-    else
-        mkl_allvar_set "__atomic_64" "HAVE_ATOMICS_64_ATOMIC" "y"
-    fi
-
-
-    if [[ ! -z $_a64 ]]; then
-        mkl_define_set "atomic_64" "ATOMIC_OP64(OP1,OP2,PTR,VAL)" "code:$_a64"
-
-        # Define generic ATOMIC() macro identical to 64-bit atomics"
-        mkl_define_set "atomic_64" "ATOMIC_OP(OP1,OP2,PTR,VAL)" "code:$_a64"
-    fi
-
-
-    if [[ ! -z $_libs ]]; then
-        mkl_mkvar_append LDFLAGS LDFLAGS "-Wl,--as-needed"
-        mkl_mkvar_append LIBS LIBS "$_libs"
-    fi
-
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mklove/modules/configure.base
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.base b/thirdparty/librdkafka-0.11.1/mklove/modules/configure.base
deleted file mode 100644
index df9c779..0000000
--- a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.base
+++ /dev/null
@@ -1,1772 +0,0 @@
-#!/bin/bash
-#
-#
-# mklove base configure module, implements the mklove configure framework
-#
-
-MKL_MODULES="base"
-MKL_CACHEVARS=""
-MKL_MKVARS=""
-MKL_DEFINES=""
-MKL_CHECKS=""
-MKL_LOAD_STACK=""
-
-MKL_IDNEXT=1
-
-MKL_OUTMK=_mklout.mk
-MKL_OUTH=_mklout.h
-MKL_OUTDBG=config.log
-
-MKL_GENERATORS="base:mkl_generate_late_vars"
-MKL_CLEANERS=""
-
-MKL_FAILS=""
-MKL_LATE_VARS=""
-
-MKL_OPTS_SET=""
-
-MKL_RED=""
-MKL_GREEN=""
-MKL_YELLOW=""
-MKL_BLUE=""
-MKL_CLR_RESET=""
-
-
-MKL_NO_DOWNLOAD=0
-
-if [[ -z "$MKL_REPO_URL" ]]; then
-    MKL_REPO_URL="http://github.com/edenhill/mklove/raw/master"
-fi
-
-
-
-# Default mklove directory to PWD/mklove
-[[ -z "$MKLOVE_DIR" ]] && MKLOVE_DIR=mklove
-
-
-###########################################################################
-#
-# Variable types:
-#   env      - Standard environment variables.
-#   var      - mklove runtime variable, cached or not.
-#   mkvar    - Makefile variables, also sets runvar
-#   define   - config.h variables/defines
-#
-###########################################################################
-
-# Low level variable assignment
-# Arguments:
-#  variable name
-#  variable value
-function mkl_var0_set {
-    export "$1"="$2"
-}
-
-# Sets a runtime variable (only used during configure)
-# If cache=1 these variables are cached to config.cache.
-# Arguments:
-#  variable name
-#  variable value
-#  [ "cache" ]
-function mkl_var_set {
-    mkl_var0_set "$1" "$2"
-    if [[ $3 == "cache" ]]; then
-        if ! mkl_in_list "$MKL_CACHEVARS" "$1" ; then
-            MKL_CACHEVARS="$MKL_CACHEVARS $1"
-        fi
-    fi
-}
-
-# Unsets a mkl variable
-# Arguments:
-#  variable name
-function mkl_var_unset {
-    unset $1
-}
-
-# Appends to a mkl variable (space delimited)
-# Arguments:
-#  variable name
-#  variable value
-function mkl_var_append {
-    if [[ -z ${!1} ]]; then
-        mkl_var_set "$1" "$2"
-    else
-        mkl_var0_set "$1" "${!1} $2"
-    fi
-}
-
-
-# Prepends to a mkl variable (space delimited)
-# Arguments:
-#  variable name
-#  variable value
-function mkl_var_prepend {
-    if [[ -z ${!1} ]]; then
-        mkl_var_set "$1" "$2"
-    else
-        mkl_var0_set "$1" "$2 ${!1}"
-    fi
-}
-
-# Shift the first word off a variable.
-# Arguments:
-#  variable name
-function mkl_var_shift {
-    local n="${!1}"
-    mkl_var0_set "$1" "${n#* }"
-    return 0
-}
-
-
-# Returns the contents of mkl variable
-# Arguments:
-#  variable name
-function mkl_var_get {
-    echo "${!1}"
-}
-
-
-
-
-# Set environment variable (runtime)
-# These variables are not cached nor written to any of the output files,
-# its just simply a helper wrapper for standard envs.
-# Arguments:
-#  varname
-#  varvalue
-function mkl_env_set {
-    mkl_var0_set "$1" "$2"
-}
-
-# Append to environment variable
-# Arguments:
-#  varname
-#  varvalue
-#  [ separator (" ") ]
-function mkl_env_append {
-    local sep=" "
-    if [[ -z ${!1} ]]; then
-        mkl_env_set "$1" "$2"
-    else
-        [ ! -z ${3} ] && sep="$3"
-        mkl_var0_set "$1" "${!1}${sep}$2"
-    fi
-
-}
-
-# Prepend to environment variable
-# Arguments:
-#  varname
-#  varvalue
-#  [ separator (" ") ]
-function mkl_env_prepend {
-    local sep=" "
-    if [[ -z ${!1} ]]; then
-        mkl_env_set "$1" "$2"
-    else
-        [ ! -z ${3} ] && sep="$3"
-        mkl_var0_set "$1" "$2${sep}${!1}"
-    fi
-
-}
-
-
-
-
-# Set a make variable (Makefile.config)
-# Arguments:
-#  config name
-#  variable name
-#  value
-function mkl_mkvar_set {
-    if [[ ! -z $2 ]]; then
-        mkl_env_set "$2" "$3"
-        mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2
-    fi
-}
-
-
-# Prepends to a make variable (Makefile.config)
-# Arguments:
-#  config name
-#  variable name
-#  value
-function mkl_mkvar_prepend {
-    if [[ ! -z $2 ]]; then
-        mkl_env_prepend "$2" "$3"
-        mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2
-    fi
-}
-
-
-# Appends to a make variable (Makefile.config)
-# Arguments:
-#  config name
-#  variable name
-#  value
-function mkl_mkvar_append {
-    if [[ ! -z $2 ]]; then
-        mkl_env_append "$2" "$3"
-        mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2
-    fi
-}
-
-
-# Prepends to a make variable (Makefile.config)
-# Arguments:
-#  config name
-#  variable name
-#  value
-function mkl_mkvar_prepend {
-    if [[ ! -z $2 ]]; then
-        mkl_env_prepend "$2" "$3"
-        mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2
-    fi
-}
-
-# Return mkvar variable value
-# Arguments:
-#  variable name
-function mkl_mkvar_get {
-    [[ -z ${!1} ]] && return 1
-    echo ${!1}
-    return 0
-}
-
-
-
-# Defines a config header define (config.h)
-# Arguments:
-#  config name
-#  define name
-#  define value (optional, default: 1)
-#   if value starts with code: then no "" are added
-function mkl_define_set {
-
-    if [[ -z $2 ]]; then
-        return 0
-    fi
-
-    local stmt=""
-    local defid=
-    if [[ $2 = *\(* ]]; then
-        # macro
-        defid="def_${2%%(*}"
-    else
-        # define
-        defid="def_$2"
-    fi
-
-    [[ -z $1 ]] || stmt="// $1\n"
-
-    local val="$3"
-    if [[ -z "$val" ]]; then
-        val="$(mkl_def $2 1)"
-    fi
-
-    # Define as code, string or integer?
-    if [[ $val == code:* ]]; then
-        # Code block, copy verbatim without quotes, strip code: prefix
-        val=${val#code:}
-    elif [[ ! ( "$val" =~ ^[0-9]+([lL]?[lL][dDuU]?)?$ || \
-        "$val" =~ ^0x[0-9a-fA-F]+([lL]?[lL][dDuU]?)?$ ) ]]; then
-        # String: quote
-        val="\"$val\""
-    fi
-    # else: unquoted integer/hex
-
-    stmt="${stmt}#define $2 $val"
-    mkl_env_set "$defid" "$stmt"
-    mkl_env_append MKL_DEFINES "$defid"
-}
-
-
-
-
-
-# Sets "all" configuration variables, that is:
-# for name set: Makefile variable, config.h define
-# Will convert value "y"|"n" to 1|0 for config.h
-# Arguments:
-#  config name
-#  variable name
-#  value
-function mkl_allvar_set {
-    mkl_mkvar_set "$1" "$2" "$3"
-    local val=$3
-    if [[ $3 = "y" ]]; then
-        val=1
-    elif [[ $3 = "n" ]]; then
-        val=0
-    fi
-    mkl_define_set "$1" "$2" "$val"
-}
-
-
-
-
-###########################################################################
-#
-#
-# Check failure functionality
-#
-#
-###########################################################################
-
-
-# Summarize all fatal failures and then exits.
-function mkl_fail_summary {
-    echo "
-
-"
-
-    local pkg_cmd=""
-    local install_pkgs=""
-    mkl_err "###########################################################"
-    mkl_err "###                  Configure failed                   ###"
-    mkl_err "###########################################################"
-    mkl_err "### Accumulated failures:                               ###"
-    mkl_err "###########################################################"
-    local n
-    for n in $MKL_FAILS ; do
-        local conf=$(mkl_var_get MKL_FAIL__${n}__conf)
-        mkl_err  " $conf ($(mkl_var_get MKL_FAIL__${n}__define)) $(mkl_meta_get $conf name)"
-        if mkl_meta_exists $conf desc; then
-            mkl_err0 "      desc: $MKL_YELLOW$(mkl_meta_get $conf desc)$MKL_CLR_RESET"
-        fi
-        mkl_err0 "    module: $(mkl_var_get MKL_FAIL__${n}__module)"
-        mkl_err0 "    action: $(mkl_var_get MKL_FAIL__${n}__action)"
-        mkl_err0 "    reason:
-$(mkl_var_get MKL_FAIL__${n}__reason)
-"
-        # Dig up some metadata to assist the user
-        case $MKL_DISTRO in
-            Debian|Ubuntu|*)
-                local debs=$(mkl_meta_get $conf "deb")
-                pkg_cmd="sudo apt-get install"
-                if [[ ${#debs} > 0 ]]; then
-                    install_pkgs="$install_pkgs $debs"
-                fi
-                ;;
-        esac
-    done
-
-    if [[ ! -z $install_pkgs ]]; then
-        mkl_err "###########################################################"
-        mkl_err "### Installing the following packages might help:       ###"
-        mkl_err "###########################################################"
-        mkl_err0 "$pkg_cmd $install_pkgs"
-        mkl_err0 ""
-    fi
-    exit 1
-}
-
-
-# Checks if there were failures.
-# Returns 0 if there were no failures, else calls failure summary and exits.
-function mkl_check_fails {
-    if [[ ${#MKL_FAILS} = 0 ]]; then
-        return 0
-    fi
-    mkl_fail_summary
-}
-
-# A check has failed but we want to carry on (and we should!).
-# We fail it all later.
-# Arguments:
-#  config name
-#  define name
-#  action
-#  reason
-function mkl_fail {
-    local n="$(mkl_env_esc "$1")"
-    mkl_var_set "MKL_FAIL__${n}__conf" "$1"
-    mkl_var_set "MKL_FAIL__${n}__module" $MKL_MODULE
-    mkl_var_set "MKL_FAIL__${n}__define" $2
-    mkl_var_set "MKL_FAIL__${n}__action" "$3"
-    if [[ -z $(mkl_var_get "MKL_FAIL__${n}__reason") ]]; then
-        mkl_var_set "MKL_FAIL__${n}__reason" "$4"
-    else
-        mkl_var_append "MKL_FAIL__${n}__reason" "
-And also:
-$4"
-    fi
-    mkl_in_list "$MKL_FAILS" "$n" || mkl_var_append MKL_FAILS "$n"
-}
-
-
-# A check failed, handle it
-# Arguments:
-#  config name
-#  define name
-#  action (fail|disable|ignore|cont)
-#  reason
-function mkl_check_failed {
-    # Override action based on require directives, unless the action is
-    # set to cont (for fallthrough to sub-sequent tests).
-    local action="$3"
-    if [[ $3 != "cont" ]]; then
-        action=$(mkl_meta_get "MOD__$MKL_MODULE" "override_action" $3)
-    fi
-
-    # --fail-fatal option
-    [[ $MKL_FAILFATAL ]] && action="fail"
-
-    mkl_check_done "$1" "$2" "$action" "failed"
-
-    mkl_dbg "Check $1 ($2, action $action (originally $3)) failed: $4"
-
-
-    case $action in
-        fail)
-            # Check failed fatally, fail everything eventually
-            mkl_fail "$1" "$2" "$3" "$4$extra"
-            return 1
-            ;;
-
-        disable)
-            # Check failed, disable
-            [[ ! -z $2 ]] && mkl_mkvar_set "$1" "$2" "n"
-            return 1
-            ;;
-        ignore)
-            # Check failed but we ignore the results and set it anyway.
-            [[ ! -z $2 ]] && mkl_define_set "$1" "$2" "1"
-            [[ ! -z $2 ]] && mkl_mkvar_set "$1" "$2" "y"
-            return 1
-            ;;
-        cont)
-            # Check failed but we ignore the results and do nothing.
-            return 0
-            ;;
-    esac
-}
-
-
-
-
-###########################################################################
-#
-#
-# Output generators
-#
-#
-###########################################################################
-
-# Generate late variables.
-# Late variables are those referenced in command line option defaults
-# but then never set by --option.
-function mkl_generate_late_vars {
-    local n
-    for n in $MKL_LATE_VARS ; do
-        local func=${n%:*}
-        local safeopt=${func#opt_}
-        local val=${n#*:}
-        if mkl_in_list "$MKL_OPTS_SET" "$safeopt" ; then
-            # Skip options set explicitly with --option
-            continue
-        fi
-        # Expand variable references "\$foo" by calling eval
-        # and pass it opt_... function.
-        $func "$(eval echo $val)"
-    done
-}
-
-# Generate output files.
-# Must be called following a succesful configure run.
-function mkl_generate {
-    local mf=
-    for mf in $MKL_GENERATORS ; do
-        MKL_MODULE=${mf%:*}
-        local func=${mf#*:}
-        $func || exit 1
-    done
-
-    mkl_write_mk "# Automatically generated by $0 $*"
-    mkl_write_mk "# Config variables"
-    mkl_write_mk "#"
-    mkl_write_mk "# Generated by:"
-    mkl_write_mk "# $MKL_CONFIGURE_ARGS"
-    mkl_write_mk ""
-
-    # This variable is used by Makefile.base to avoid multiple inclusions.
-    mkl_write_mk "MKL_MAKEFILE_CONFIG=y"
-
-    # Export colors to Makefile.config
-    mkl_write_mk "MKL_RED=\t${MKL_RED}"
-    mkl_write_mk "MKL_GREEN=\t${MKL_GREEN}"
-    mkl_write_mk "MKL_YELLOW=\t${MKL_YELLOW}"
-    mkl_write_mk "MKL_BLUE=\t${MKL_BLUE}"
-    mkl_write_mk "MKL_CLR_RESET=\t${MKL_CLR_RESET}"
-
-    local n=
-    for n in $MKL_MKVARS ; do
-	# Some special variables should be prefixable by the caller, so
-	# define them in the makefile as appends.
-	local op="="
-	case $n in
-	    CFLAGS|CPPFLAGS|CXXFLAGS|LDFLAGS|LIBS)
-		op="+="
-		;;
-	esac
-        mkl_write_mk "$n$op\t${!n}"
-    done
-    mkl_write_mk "# End of config variables"
-
-    MKL_OUTMK_FINAL=Makefile.config
-    mv $MKL_OUTMK $MKL_OUTMK_FINAL
-
-    echo "Generated $MKL_OUTMK_FINAL"
-
-    # Generate config.h
-    mkl_write_h "// Automatically generated by $0 $*"
-    mkl_write_h "#pragma once"
-    for n in $MKL_DEFINES ; do
-        mkl_write_h "${!n}"
-    done
-
-    MKL_OUTH_FINAL=config.h
-    mv $MKL_OUTH $MKL_OUTH_FINAL
-
-    echo "Generated $MKL_OUTH_FINAL"
-}
-
-# Remove file noisily, if it exists
-function mkl_rm {
-    if [[ -f $fname ]]; then
-        echo "Removing $fname"
-        rm -f "$fname"
-    fi
-}
-
-# Remove files generated by configure
-function mkl_clean {
-    for fname in Makefile.config config.h config.cache config.log ; do
-        mkl_rm "$fname"
-    done
-
-    local mf=
-    for mf in $MKL_CLEANERS ; do
-        MKL_MODULE=${mf%:*}
-        local func=${mf#*:}
-        $func || exit 1
-    done
-
-}
-
-
-# Print summary of succesful configure run
-function mkl_summary {
-
-    echo "
-Configuration summary:"
-    local n=
-    for n in $MKL_MKVARS ; do
-        # Skip the boring booleans
-        if [[ $n == WITH_* || $n == WITHOUT_* || $n == HAVE_* || $n == def_* ]]; then
-            continue
-        fi
-        printf "  %-24s %s\n" "$n" "${!n}"
-    done
-}
-
-
-
-# Write to mk file
-# Argument:
-#  string ..
-function mkl_write_mk {
-    echo -e "$*" >> $MKL_OUTMK
-}
-
-# Write to header file
-# Argument:
-#  string ..
-function mkl_write_h {
-    echo -e "$*" >> $MKL_OUTH
-}
-
-
-
-###########################################################################
-#
-#
-# Logging and debugging
-#
-#
-###########################################################################
-
-# Debug print
-# Only visible on terminal if MKL_DEBUG is set.
-# Always written to config.log
-# Argument:
-#  string ..
-function mkl_dbg {
-    if [[ ! -z $MKL_DEBUG ]]; then
-        echo -e "${MKL_BLUE}DBG:$$: $*${MKL_CLR_RESET}" 1>&2
-    fi
-    echo "DBG: $*" >> $MKL_OUTDBG
-}
-
-# Error print (with color)
-# Always printed to terminal and config.log
-# Argument:
-#  string ..
-function mkl_err {
-    echo -e "${MKL_RED}$*${MKL_CLR_RESET}" 1>&2
-    echo "$*" >> $MKL_OUTDBG
-}
-
-# Same as mkl_err but without coloring
-# Argument:
-#  string ..
-function mkl_err0 {
-    echo -e "$*" 1>&2
-    echo "$*" >> $MKL_OUTDBG
-}
-
-# Standard print
-# Always printed to terminal and config.log
-# Argument:
-#  string ..
-function mkl_info {
-    echo -e "$*" 1>&2
-    echo -e "$*" >> $MKL_OUTDBG
-}
-
-
-
-
-
-
-
-###########################################################################
-#
-#
-# Misc helpers
-#
-#
-###########################################################################
-
-# Returns the absolute path (but not necesarily canonical) of the first argument
-function mkl_abspath {
-    echo $1 | sed -e "s|^\([^/]\)|$PWD/\1|"
-}
-
-# Returns true (0) if function $1 exists, else false (1)
-function mkl_func_exists {
-    declare -f "$1" > /dev/null
-    return $?
-}
-
-# Rename function.
-# Returns 0 on success or 1 if old function (origname) was not defined.
-# Arguments:
-#   origname
-#   newname
-function mkl_func_rename {
-    if ! mkl_func_exists $1 ; then
-        return 1
-    fi
-    local orig=$(declare -f $1)
-    local new="$2${orig#$1}"
-    eval "$new"
-    unset -f "$1"
-    return 0
-}
-
-
-# Push module function for later call by mklove.
-# The function is renamed to an internal name.
-# Arguments:
-#  list variable name
-#  module name
-#  function name
-function mkl_func_push {
-    local newfunc="__mkl__f_${2}_$(( MKL_IDNEXT++ ))"
-    if mkl_func_rename "$3" "$newfunc" ; then
-        mkl_var_append "$1" "$2:$newfunc"
-    fi
-}
-
-
-
-# Returns value, or the default string if value is empty.
-# Arguments:
-#  value
-#  default
-function mkl_def {
-    if [[ ! -z $1 ]]; then
-        echo $1
-    else
-        echo $2
-    fi
-}
-
-
-# Render a string (e.g., evaluate its $varrefs)
-# Arguments:
-#  string
-function mkl_render {
-    if [[ $* == *\$* ]]; then
-        eval "echo $*"
-    else
-        echo "$*"
-    fi
-}
-
-# Escape a string so that it becomes suitable for being an env variable.
-# This is a destructive operation and the original string cannot be restored.
-function mkl_env_esc {
-    echo $* | LC_ALL=C sed -e 's/[^a-zA-Z0-9_]/_/g'
-}
-
-# Convert arguments to upper case
-function mkl_upper {
-    echo "$*" | tr '[:lower:]' '[:upper:]'
-}
-
-# Convert arguments to lower case
-function mkl_lower {
-    echo "$*" | tr '[:upper:]' '[:lower:]'
-}
-
-
-# Checks if element is in list
-# Arguments:
-#   list
-#   element
-function mkl_in_list {
-    local n
-    for n in $1 ; do
-        [[ $n == $2 ]] && return 0
-    done
-    return 1
-}
-
-
-
-
-###########################################################################
-#
-#
-# Cache functionality
-#
-#
-###########################################################################
-
-
-# Write cache file
-function mkl_cache_write {
-    [[ ! -z "$MKL_NOCACHE" ]] && return 0
-    echo "# mklove configure cache file generated at $(date)" > config.cache
-    for n in $MKL_CACHEVARS ; do
-        echo "$n=${!n}" >> config.cache
-    done
-    echo "Generated config.cache"
-}
-
-
-# Read cache file
-function mkl_cache_read {
-    [[ ! -z "$MKL_NOCACHE" ]] && return 0
-    [ -f config.cache ] || return 1
-
-    echo "using cache file config.cache"
-
-    local ORIG_IFS=$IFS
-    IFS="$IFS="
-    while read -r n v ; do
-        [[ -z $n || $n = \#* || -z $v ]] && continue
-        mkl_var_set $n $v cache
-    done < config.cache
-    IFS=$ORIG_IFS
-}
-
-
-###########################################################################
-#
-#
-# Config name meta data
-#
-#
-###########################################################################
-
-# Set metadata for config name
-# This metadata is used by mkl in various situations
-# Arguments:
-#   config name
-#   metadata key
-#   metadata value (appended)
-function mkl_meta_set {
-    local metaname="mkl__$1__$2"
-    eval "$metaname=\"\$$metaname $3\""
-}
-
-# Returns metadata for config name
-# Arguments:
-#   config name
-#   metadata key
-#   default (optional)
-function mkl_meta_get {
-    local metaname="mkl__$1__$2"
-    if [[ ! -z ${!metaname} ]]; then
-        echo ${!metaname}
-    else
-        echo "$3"
-    fi
-}
-
-# Checks if metadata exists
-# Arguments:
-#   config name
-#   metadata key
-function mkl_meta_exists {
-    local metaname="mkl__$1__$2"
-    if [[ ! -z ${!metaname} ]]; then
-        return 0
-    else
-        return 1
-    fi
-}
-
-
-
-
-
-###########################################################################
-#
-#
-# Check framework
-#
-#
-###########################################################################
-
-
-# Print that a check is beginning to run
-# Returns 0 if a cached result was used (do not continue with your tests),
-# else 1.
-#
-# If the check should not be cachable then specify argument 3 as "no-cache",
-# this is useful when a check not only checks but actually sets config
-# variables itself (which is not recommended, but desired sometimes).
-#
-# Arguments:
-#  [ --verb "verb.." ]  (replace "checking for")
-#  config name
-#  define name
-#  action  (fail,cont,disable or no-cache)
-#  [ display name ]
-function mkl_check_begin {
-    local verb="checking for"
-    if [[ $1 == "--verb" ]]; then
-        verb="$2"
-        shift
-        shift
-    fi
-
-    local name=$(mkl_meta_get $1 name "$4")
-    [[ -z $name ]] && name="x:$1"
-
-    echo -n "$verb $name..."
-    if [[ $3 != "no-cache" ]]; then
-        local status=$(mkl_var_get "MKL_STATUS_$1")
-        # Check cache (from previous run or this one).
-        # Only used cached value if the cached check succeeded:
-        # it is more likely that a failed check has been fixed than the other
-        # way around.
-        if [[ ! -z $status && ( $status = "ok" ) ]]; then
-            mkl_check_done "$1" "$2" "$3" $status "cached"
-            return 0
-        fi
-    fi
-    return 1
-}
-
-# Print that a check is done
-# Arguments:
-#  config name
-#  define name
-#  action
-#  status (ok|failed)
-#  extra-info (optional)
-function mkl_check_done {
-    # Clean up configname to be a safe varname
-    local cname=${1//-/_}
-    mkl_var_set "MKL_STATUS_$cname" "$4" cache
-
-    local extra=""
-    if [[ $4 = "failed" ]]; then
-        local clr=$MKL_YELLOW
-        extra=" ($3)"
-        case "$3" in
-            fail)
-                clr=$MKL_RED
-                ;;
-            cont)
-                extra=""
-                ;;
-        esac
-        echo -e " $clr$4$MKL_CLR_RESET${extra}"
-    else
-        [[ ! -z $2 ]] && mkl_define_set "$cname" "$2" "1"
-        [[ ! -z $2 ]] && mkl_mkvar_set  "$cname" "$2" "y"
-        [ ! -z "$5" ] && extra=" ($5)"
-        echo -e " $MKL_GREEN$4${MKL_CLR_RESET}$extra"
-    fi
-}
-
-
-# Perform configure check by compiling source snippet
-# Arguments:
-#  [--ldflags="..." ]  (appended after "compiler arguments" below)
-#  config name
-#  define name
-#  action (fail|disable)
-#  compiler (CC|CXX)
-#  compiler arguments (optional "", example: "-lzookeeper")
-#  source snippet
-function mkl_compile_check {
-    local ldf=
-    if [[ $1 == --ldflags=* ]]; then
-	ldf=${1#*=}
-	shift
-    fi
-    mkl_check_begin "$1" "$2" "$3" "$1 (by compile)" && return $?
-
-    local cflags=
-
-    if [[ $4 = "CXX" ]]; then
-        local ext=cpp
-        cflags="$(mkl_mkvar_get CXXFLAGS)"
-    else
-        local ext=c
-        cflags="$(mkl_mkvar_get CFLAGS)"
-    fi
-
-    local srcfile=$(mktemp _mkltmpXXXXXX)
-    mv "$srcfile" "${srcfile}.$ext"
-    srcfile="$srcfile.$ext"
-    echo "$6" > $srcfile
-    echo "
-int main () { return 0; }
-" >> $srcfile
-
-    local cmd="${!4} $cflags $(mkl_mkvar_get CPPFLAGS) -Wall -Werror $srcfile -o ${srcfile}.o $ldf $(mkl_mkvar_get LDFLAGS) $5";
-    mkl_dbg "Compile check $1 ($2): $cmd"
-
-    local output
-    output=$($cmd 2>&1)
-
-    if [[ $? != 0 ]] ; then
-        mkl_dbg "compile check for $1 ($2) failed: $cmd: $output"
-        mkl_check_failed "$1" "$2" "$3" "compile check failed:
-CC: $4
-flags: $5
-$cmd:
-$output
-source: $6"
-        local ret=1
-    else
-        mkl_check_done "$1" "$2" "$3" "ok"
-        local ret=0
-    fi
-
-    # OSX XCode toolchain creates dSYM directories when -g is set,
-    # delete them specifically.
-    rm -rf "$srcfile" "${srcfile}.o" "$srcfile*dSYM"
-
-    return $ret
-}
-
-
-# Try to link with a library.
-# Arguments:
-#  config name
-#  define name
-#  action (fail|disable)
-#  linker flags (e.g. "-lpthreads")
-function mkl_link_check {
-    mkl_check_begin "$1" "$2" "$3" "$1 (by linking)" && return $?
-
-    local srcfile=$(mktemp _mktmpXXXXXX)
-    echo "int main () { return 0; }" > $srcfile
-
-    local cmd="${CC} $(mkl_mkvar_get LDFLAGS) -c $srcfile -o ${srcfile}.o $4";
-    mkl_dbg "Link check $1 ($2): $cmd"
-
-    local output
-    output=$($cmd 2>&1)
-
-    if [[ $? != 0 ]] ; then
-        mkl_dbg "link check for $1 ($2) failed: $output"
-        mkl_check_failed "$1" "$2" "$3" "compile check failed:
-$output"
-        local ret=1
-    else
-        mkl_check_done "$1" "$2" "$3" "ok" "$4"
-        local ret=0
-    fi
-
-    rm -f $srcfile*
-    return $ret
-}
-
-
-
-# Tries to figure out if we can use a static library or not.
-# Arguments:
-#  library name   (e.g. -lrdkafka)
-#  compiler flags (optional "", e.g: "-lyajl")
-# Returns/outputs:
-#  New list of compiler flags
-function mkl_lib_check_static {
-    local libname=$1
-    local libs=$2
-    local arfile_var=STATIC_LIB_${libname#-l}
-
-    mkl_dbg "Check $libname for static library (libs $libs, arfile variable $arfile_var=${!arfile_var})"
-
-    # If STATIC_LIB_<libname_without_-l> specifies an existing .a file we
-    # use that instead.
-    if [[ -f ${!arfile_var} ]]; then
-	libs=$(echo $libs | sed -e "s|$libname|${!arfile_var}|g")
-    elif [[ $HAS_LDFLAGS_STATIC == y ]]; then
-        libs=$(echo $libs | sed -e "s|$libname|${LDFLAGS_STATIC} $libname ${LDFLAGS_DYNAMIC}|g")
-    else
-        mkl_dbg "$libname: Neither $arfile_var specified or static linker flags supported: static linking probably won't work"
-    fi
-
-    echo $libs
-}
-
-
-# Checks that the specified lib is available through a number of methods.
-# compiler flags are automatically appended to "LIBS" mkvar on success.
-#
-# If STATIC_LIB_<libname_without_-l> is set to the path of an <libname>.a file
-# it will be used instead of -l<libname>.
-#
-# Arguments:
-#  [--static=<lib>]  (allows static linking (--enable-static) for the
-#                     library provided, e.g.: --static=-lrdkafka "librdkafka"..)
-#  [--libname=<lib>] (library name if different from config name, such as
-#                     when the libname includes a dash)
-#  config name (library name (for pkg-config))
-#  define name
-#  action (fail|disable|cont)
-#  compiler (CC|CXX)
-#  compiler flags (optional "", e.g: "-lyajl")
-#  source snippet
-function mkl_lib_check {
-
-    local is_static=0
-    local staticopt=
-    if [[ $1 == --static* ]]; then
-        staticopt=$1
-        shift
-    fi
-
-    local libnameopt=
-    local libname=$1
-    if [[ $1 == --libname* ]]; then
-        libnameopt=$1
-        libname="${libnameopt#*=}"
-        shift
-    fi
-
-    # pkg-config result (0=ok)
-    local pkg_conf_failed=1
-    if [[ $WITH_PKGCONFIG == "y" ]]; then
-        # Let pkg-config populate CFLAGS, et.al.
-        mkl_pkg_config_check $staticopt $libnameopt "$1" "" cont
-        pkg_conf_failed=$?
-    fi
-
-    local libs=""
-    if [[ $pkg_conf_failed ]]; then
-        libs="$5"
-        if [[ $WITH_STATIC_LINKING == y && ! -z $staticopt ]]; then
-            libs=$(mkl_lib_check_static "${staticopt#*=}" "$libs")
-            is_static=1
-        fi
-    fi
-
-    if ! mkl_compile_check "$1" "$2" "$3" "$4" "$libs" "$6"; then
-        return 1
-    fi
-
-    if [[ $pkg_conf_failed == 1 ]]; then
-        if [[ $is_static == 1 ]]; then
-            mkl_mkvar_prepend "$1" LIBS "$libs"
-        else
-            mkl_mkvar_append "$1" LIBS "$libs"
-        fi
-    fi
-
-    return 0
-}
-
-
-# Check for library with pkg-config
-# Automatically sets CFLAGS and LIBS from pkg-config information.
-# Arguments:
-#  [--static=<lib>]  (allows static linking (--enable-static) for the
-#                     library provided, e.g.: --static=-lrdkafka "librdkafka"..)
-#  [--libname=<lib>] (library name if different from config name, such as
-#                     when the libname includes a dash)
-#  config name
-#  define name
-#  action (fail|disable|ignore)
-function mkl_pkg_config_check {
-
-    local staticopt=
-    if [[ $1 == --static* ]]; then
-        staticopt=$1
-        shift
-    fi
-
-    local libname=$1
-    if [[ $1 == --libname* ]]; then
-        libname="${libnameopt#*=}"
-        shift
-    fi
-
-    local cname="${1}_PKGCONFIG"
-    mkl_check_begin "$cname" "$2" "no-cache" "$1 (by pkg-config)" && return $?
-
-    local cflags=
-    local cmd="${PKG_CONFIG} --short-errors --cflags $libname"
-    mkl_dbg "pkg-config check $libname ($2): $cmd"
-
-    cflags=$($cmd 2>&1)
-    if [[ $? != 0 ]]; then
-        mkl_dbg "'$cmd' failed: $cflags"
-        mkl_check_failed "$cname" "$2" "$3" "'$cmd' failed:
-$cflags"
-        return 1
-    fi
-
-    local libs=
-    libs=$(${PKG_CONFIG} --short-errors --libs $libname 2>&1)
-    if [[ $? != 0 ]]; then
-        mkl_dbg "${PKG_CONFIG} --libs $libname failed: $libs"
-        mkl_check_failed "$cname" "$2" "$3" "pkg-config --libs failed"
-        return 1
-    fi
-
-    mkl_mkvar_append $1 "CFLAGS" "$cflags"
-
-    if [[ $WITH_STATIC_LINKING == y && ! -z $staticopt ]]; then
-        libs=$(mkl_lib_check_static "${staticopt#*=}" "$libs")
-        mkl_mkvar_prepend "$1" LIBS "$libs"
-    else
-        mkl_mkvar_append "$1" LIBS "$libs"
-    fi
-
-    mkl_check_done "$1" "$2" "$3" "ok"
-
-    return 0
-}
-
-
-# Check that a command runs and exits succesfully.
-# Arguments:
-#  config name
-#  define name (optional, can be empty)
-#  action
-#  command
-function mkl_command_check {
-    mkl_check_begin "$1" "$2" "$3" "$1 (by command)" && return $?
-
-    local out=
-    out=$($4 2>&1)
-    if [[ $? != 0 ]]; then
-        mkl_dbg "$1: $2: $4 failed: $out"
-        mkl_check_failed "$1" "$2" "$3" "command '$4' failed:
-$out"
-        return 1
-    fi
-
-    mkl_check_done "$1" "$2" "$3" "ok"
-
-    return 0
-}
-
-
-# Check that a program is executable, but will not execute it.
-# Arguments:
-#  config name
-#  define name (optional, can be empty)
-#  action
-#  program name  (e.g, objdump)
-function mkl_prog_check {
-    mkl_check_begin --verb "checking executable" "$1" "$2" "$3" "$1" && return $?
-
-    local out=
-    out=$(command -v "$4" 2>&1)
-    if [[ $? != 0 ]]; then
-        mkl_dbg "$1: $2: $4 is not executable: $out"
-        mkl_check_failed "$1" "$2" "$3" "$4 is not executable"
-        return 1
-    fi
-
-    mkl_check_done "$1" "$2" "$3" "ok"
-
-    return 0
-}
-
-
-
-
-# Checks that the check for the given config name passed.
-# This does not behave like the other checks, if the given config name passed
-# its test then nothing is printed. Else the configure will fail.
-# Arguments:
-#  checked config name
-function mkl_config_check {
-    local status=$(mkl_var_get "MKL_STATUS_$1")
-    [[ $status = "ok" ]] && return 0
-    mkl_fail $1 "" "fail" "$MKL_MODULE requires $1"
-    return 1
-}
-
-
-# Checks that all provided config names are set.
-# Arguments:
-#  config name
-#  define name
-#  action
-#  check_config_name1
-#  check_config_name2..
-function mkl_config_check_all {
-    local cname=
-    local res="ok"
-    echo start this now for $1
-    for cname in ${@:4}; do
-        local st=$(mkl_var_get "MKL_STATUS_$cname")
-        [[ $status = "ok" ]] && continue
-        mkl_fail $1 $2 $3 "depends on $cname"
-        res="failed"
-    done
-
-    echo "try res $res"
-    mkl_check_done "$1" "$2" "$3" $res
-}
-
-
-# Check environment variable
-# Arguments:
-#  config name
-#  define name
-#  action
-#  environment variable
-function mkl_env_check {
-    mkl_check_begin "$1" "$2" "$3" "$1 (by env $4)" && return $?
-
-    if [[ -z ${!4} ]]; then
-        mkl_check_failed "$1" "$2" "$3" "environment variable $4 not set"
-        return 1
-    fi
-
-    mkl_check_done "$1" "$2" "$3" "ok" "${!4}"
-
-    return 0
-}
-
-
-# Run all checks
-function mkl_checks_run {
-    # Set up common variables
-    mkl_allvar_set "" MKL_APP_NAME $(mkl_meta_get description name)
-    mkl_allvar_set "" MKL_APP_DESC_ONELINE "$(mkl_meta_get description oneline)"
-
-    # Call checks functions in dependency order
-    local mf
-    for mf in $MKL_CHECKS ; do
-        MKL_MODULE=${mf%:*}
-        local func=${mf#*:}
-
-        if mkl_func_exists $func ; then
-            $func
-        else
-            mkl_err "Check function $func from $MKL_MODULE disappeared ($mf)"
-        fi
-        unset MKL_MODULE
-    done
-}
-
-
-# Check for color support in terminal.
-# If the terminal supports colors, the function will alter
-#  MKL_RED
-#  MKL_GREEN
-#  MKL_YELLOW
-#  MKL_BLUE
-#  MKL_CLR_RESET
-function mkl_check_terminal_color_support {
-    local use_color=false
-    local has_tput=false
-
-    if [[ -z ${TERM} ]]; then
-        # tput and dircolors require $TERM
-        mkl_dbg "\$TERM is not set! Cannot check for color support in terminal."
-        return 1
-    elif hash tput 2>/dev/null; then
-        has_tput=true
-        [[ $(tput colors 2>/dev/null) -ge 8 ]] && use_color=true
-        mkl_dbg "tput reports color support: ${use_color}"
-    elif hash dircolors 2>/dev/null; then
-        # Enable color support only on colorful terminals.
-        # dircolors --print-database uses its own built-in database
-        # instead of using /etc/DIR_COLORS. Try to use the external file
-        # first to take advantage of user additions.
-        local safe_term=${TERM//[^[:alnum:]]/?}
-        local match_lhs=""
-        [[ -f ~/.dir_colors   ]] && match_lhs="${match_lhs}$(<~/.dir_colors)"
-        [[ -f /etc/DIR_COLORS ]] && match_lhs="${match_lhs}$(</etc/DIR_COLORS)"
-        [[ -z ${match_lhs}    ]] && match_lhs=$(dircolors --print-database)
-        [[ $'\n'${match_lhs} == *$'\n'"TERM "${safe_term}* ]] && use_color=true
-        mkl_dbg "dircolors reports color support: ${use_color}"
-    fi
-
-    if ${use_color}; then
-        if ${has_tput}; then
-            # In theory, user could have set different escape sequences
-            # Because tput is available we can use it to query the right values ...
-            mkl_dbg "Using color escape sequences from tput"
-            MKL_RED=$(tput setaf 1)
-            MKL_GREEN=$(tput setaf 2)
-            MKL_YELLOW=$(tput setaf 3)
-            MKL_BLUE=$(tput setaf 4)
-            MKL_CLR_RESET=$(tput sgr0)
-        else
-            mkl_dbg "Using hard-code ANSI color escape sequences"
-            MKL_RED="\033[031m"
-            MKL_GREEN="\033[032m"
-            MKL_YELLOW="\033[033m"
-            MKL_BLUE="\033[034m"
-            MKL_CLR_RESET="\033[0m"
-        fi
-    else
-        mkl_dbg "Did not detect color support in \"$TERM\" terminal!"
-    fi
-
-    return 0
-}
-
-
-
-
-###########################################################################
-#
-#
-# Module functionality
-#
-#
-###########################################################################
-
-# Downloads module from repository.
-# Arguments:
-#  module name
-# Returns:
-#  module file name
-function mkl_module_download {
-    local modname="$1"
-    local url="$MKL_REPO_URL/modules/configure.$modname"
-    local tmpfile=""
-
-    fname="${MKLOVE_DIR}/modules/configure.$modname"
-
-    if [[ $url != http*://* ]]; then
-        # Local path, just copy file.
-        if [[ ! -f $url ]]; then
-            mkl_err "Module $modname not found at $url"
-            return 1
-        fi
-
-        if ! cp "$url" "$fname" ; then
-            mkl_err "Failed to copy $url to $fname"
-            return 1
-        fi
-
-        echo "$fname"
-        return 0
-    fi
-
-    # Download
-    mkl_info "${MKL_BLUE}downloading missing module $modname from $url${MKL_CLR_RESET}"
-
-    tmpfile=$(mktemp _mkltmpXXXXXX)
-    local out=
-    out=$(wget -nv -O "$tmpfile" "$url" 2>&1)
-
-    if [[ $? -ne 0 ]]; then
-        rm -f "$tmpfile"
-        mkl_err "Failed to download $modname:"
-        mkl_err0 $out
-        return 1
-    fi
-
-    # Move downloaded file into place replacing the old file.
-    mv "$tmpfile" "$fname" || return 1
-
-    # "Return" filename
-    echo "$fname"
-
-    return 0
-}
-
-
-# Load module by name or filename
-# Arguments:
-#   "require"|"try"
-#   filename
-# [ module arguments ]
-function mkl_module_load {
-    local try=$1
-    shift
-    local fname=$1
-    shift
-    local modname=${fname#*configure.}
-    local bypath=1
-
-    # Check if already loaded
-    if mkl_in_list "$MKL_MODULES" "$modname"; then
-        return 0
-    fi
-
-    if [[ $fname = $modname ]]; then
-        # Module specified by name, find the file.
-        bypath=0
-        for fname in configure.$modname \
-            ${MKLOVE_DIR}/modules/configure.$modname ; do
-            [[ -s $fname ]] && break
-        done
-    fi
-
-    # Calling module
-    local cmod=$MKL_MODULE
-    [[ -z $cmod ]] && cmod="base"
-
-    if [[ ! -s $fname ]]; then
-        # Attempt to download module, if permitted
-        if [[ $MKL_NO_DOWNLOAD != 0 || $bypath == 1 ]]; then
-            mkl_err "Module $modname not found at $fname (required by $cmod) and downloads disabled"
-            if [[ $try = "require" ]]; then
-                mkl_fail "$modname" "none" "fail" \
-                    "Module $modname not found (required by $cmod) and downloads disabled"
-            fi
-            return 1
-        fi
-
-        fname=$(mkl_module_download "$modname")
-        if [[ $? -ne 0 ]]; then
-            mkl_err "Module $modname not found (required by $cmod)"
-            if [[ $try = "require" ]]; then
-                mkl_fail "$modname" "none" "fail" \
-                    "Module $modname not found (required by $cmod)"
-                return 1
-            fi
-        fi
-
-        # Now downloaded, try loading the module again.
-        mkl_module_load $try "$fname" "$@"
-        return $?
-    fi
-
-    # Set current module
-    local save_MKL_MODULE=$MKL_MODULE
-    MKL_MODULE=$modname
-
-    mkl_dbg "Loading module $modname (required by $cmod) from $fname"
-
-    # Source module file (positional arguments are available to module)
-    source $fname
-
-    # Restore current module (might be recursive)
-    MKL_MODULE=$save_MKL_MODULE
-
-    # Add module to list of modules
-    mkl_var_append MKL_MODULES $modname
-
-    # Rename module's special functions so we can call them separetely later.
-    mkl_func_rename "options" "${modname}_options"
-    mkl_func_push MKL_CHECKS "$modname" "checks"
-    mkl_func_push MKL_GENERATORS "$modname" "generate"
-    mkl_func_push MKL_CLEANERS "$modname" "clean"
-}
-
-
-# Require and load module
-# Must only be called from module file outside any function.
-# Arguments:
-#  [ --try ]    Dont fail if module doesn't exist
-#  module1
-#  [ "must" "pass" ]
-#  [ module arguments ... ]
-function mkl_require {
-    local try="require"
-    if [[ $1 = "--try" ]]; then
-        local try="try"
-        shift
-    fi
-
-    local mod=$1
-    shift
-    local override_action=
-
-    # Check for cyclic dependencies
-    if mkl_in_list "$MKL_LOAD_STACK" "$mod"; then
-        mkl_err "Cyclic dependency detected while loading $mod module:"
-        local cmod=
-        local lmod=$mod
-        for cmod in $MKL_LOAD_STACK ; do
-            mkl_err "  $lmod required by $cmod"
-            lmod=$cmod
-        done
-        mkl_fail base "" fail "Cyclic dependency detected while loading module $mod"
-        return 1
-    fi
-
-    mkl_var_prepend MKL_LOAD_STACK "$mod"
-
-
-    if [[ "$1 $2" == "must pass" ]]; then
-        shift
-        shift
-        override_action="fail"
-    fi
-
-    if [[ ! -z $override_action ]]; then
-        mkl_meta_set "MOD__$mod" "override_action" "$override_action"
-    fi
-
-
-    mkl_module_load $try $mod "$@"
-    local ret=$?
-
-    mkl_var_shift MKL_LOAD_STACK
-
-    return $ret
-}
-
-
-
-###########################################################################
-#
-#
-# Usage options
-#
-#
-###########################################################################
-
-
-MKL_USAGE="Usage: ./configure [OPTIONS...]
-
- mklove configure script - mklove, not autoconf
- Copyright (c) 2014-2015 Magnus Edenhill - https://github.com/edenhill/mklove
-"
-
-function mkl_usage {
-    echo "$MKL_USAGE"
-    local name=$(mkl_meta_get description name)
-
-    if [[ ! -z ${name} ]]; then
-	echo " $name - $(mkl_meta_get description oneline)
- $(mkl_meta_get description copyright)
-"
-    fi
-
-    local og
-    for og in $MKL_USAGE_GROUPS ; do
-        og="MKL_USAGE_GROUP__$og"
-        echo "${!og}"
-    done
-
-    echo "Honoured environment variables:
-  CC, CPP, CXX, CFLAGS, CPPFLAGS, CXXFLAGS, LDFLAGS, LIBS,
-  LD, NM, OBJDUMP, STRIP, PKG_CONFIG, PKG_CONFIG_PATH,
-  STATIC_LIB_<libname>=.../libname.a
-
-"
-
-}
-
-
-
-# Add usage option informative text
-# Arguments:
-#  text
-function mkl_usage_info {
-    MKL_USAGE="$MKL_USAGE
-$1"
-}
-
-
-# Add option to usage output
-# Arguments:
-#  option group ("Standard", "Cross-Compilation", etc..)
-#  variable name
-#  option ("--foo=feh")
-#  help
-#  default (optional)
-#  assignvalue (optional, default:"y")
-#  function block (optional)
-function mkl_option {
-    local optgroup=$1
-    local varname=$2
-
-    # Fixed width between option name and help in usage output
-    local pad="                                   "
-    if [[ ${#3} -lt ${#pad} ]]; then
-        pad=${pad:0:$(expr ${#pad} - ${#3})}
-    else
-        pad=""
-    fi
-
-    # Add to usage output
-    local optgroup_safe=$(mkl_env_esc $optgroup)
-    if ! mkl_in_list "$MKL_USAGE_GROUPS" "$optgroup_safe" ; then
-        mkl_env_append MKL_USAGE_GROUPS "$optgroup_safe"
-        mkl_env_set "MKL_USAGE_GROUP__$optgroup_safe" "$optgroup options:
-"
-    fi
-
-    local defstr=""
-    [[ ! -z $5 ]] && defstr=" [$5]"
-    mkl_env_append "MKL_USAGE_GROUP__$optgroup_safe" "  $3 $pad $4$defstr
-"
-
-    local optname="${3#--}"
-    local safeopt=
-    local optval=""
-    if [[ $3 == *=* ]]; then
-        optname="${optname%=*}"
-        optval="${3#*=}"
-    fi
-
-    safeopt=$(mkl_env_esc $optname)
-
-    mkl_meta_set "MKL_OPT_ARGS" "$safeopt" "$optval"
-
-    #
-    # Optional variable scoping by prefix: "env:", "mk:", "def:"
-    #
-    local setallvar="mkl_allvar_set ''"
-    local setmkvar="mkl_mkvar_set ''"
-
-    if [[ $varname = env:* ]]; then
-        # Set environment variable (during configure runtime only)
-        varname=${varname#*:}
-        setallvar=mkl_env_set
-        setmkvar=mkl_env_set
-    elif [[ $varname = mk:* ]]; then
-        # Set Makefile.config variable
-        varname=${varname#*:}
-        setallvar="mkl_mkvar_append ''"
-        setmkvar="mkl_mkvar_append ''"
-    elif [[ $varname = def:* ]]; then
-        # Set config.h define
-        varname=${varname#*:}
-        setallvar="mkl_define_set ''"
-        setmkvar="mkl_define_set ''"
-    fi
-
-
-    if [[ ! -z $7 ]]; then
-        # Function block specified.
-        eval "function opt_$safeopt { $7 }"
-    else
-    # Add default implementation of function simply setting the value.
-    # Application may override this by redefining the function after calling
-    # mkl_option.
-        if [[ $optval = "PATH" ]]; then
-        # PATH argument: make it an absolute path.
-        # Only set the make variable (not config.h)
-            eval "function opt_$safeopt { $setmkvar $varname \"\$(mkl_abspath \$(mkl_render \$1))\"; }"
-        else
-        # Standard argument: simply set the value
-            if [[ -z "$6" ]]; then
-                eval "function opt_$safeopt { $setallvar $varname \"\$1\"; }"
-            else
-                eval "function opt_$safeopt { $setallvar $varname \"$6\"; }"
-            fi
-        fi
-    fi
-
-    # If default value is provided and does not start with "$" (variable ref)
-    # then set it right away.
-    # $ variable refs are set after all checks have run during the
-    # generating step.
-    if [[ ${#5} != 0 ]] ; then
-        if [[ $5 = *\$* ]]; then
-            mkl_var_append "MKL_LATE_VARS" "opt_$safeopt:$5"
-        else
-            opt_$safeopt $5
-        fi
-    fi
-
-    if [[ ! -z $varname ]]; then
-        # Add variable to list
-        MKL_CONFVARS="$MKL_CONFVARS $varname"
-    fi
-
-}
-
-
-
-# Adds a toggle (--enable-X, --disable-X) option.
-# Arguments:
-#  option group   ("Standard", ..)
-#  variable name  (WITH_FOO)
-#  option         (--enable-foo)
-#  help           ("foo.." ("Enable" and "Disable" will be prepended))
-#  default        (y or n)
-
-function mkl_toggle_option {
-
-    # Add option argument
-    mkl_option "$1" "$2" "$3" "$4" "$5"
-
-    # Add corresponding "--disable-foo" option for "--enable-foo".
-    local disname="${3/--enable/--disable}"
-    local dishelp="${4/Enable/Disable}"
-    mkl_option "$1" "$2" "$disname" "$dishelp" "" "n"
-}
-
-# Adds a toggle (--enable-X, --disable-X) option with builtin checker.
-# This is the library version.
-# Arguments:
-#  option group   ("Standard", ..)
-#  config name    (foo, must be same as pkg-config name)
-#  variable name  (WITH_FOO)
-#  action         (fail or disable)
-#  option         (--enable-foo)
-#  help           (defaults to "Enable <config name>")
-#  linker flags   (-lfoo)
-#  default        (y or n)
-
-function mkl_toggle_option_lib {
-
-    local help="$6"
-    [[ -z "$help" ]] && help="Enable $2"
-
-    # Add option argument
-    mkl_option "$1" "$3" "$5" "$help" "$8"
-
-    # Add corresponding "--disable-foo" option for "--enable-foo".
-    local disname="${5/--enable/--disable}"
-    local dishelp="${help/Enable/Disable}"
-    mkl_option "$1" "$3" "$disname" "$dishelp" "" "n"
-
-    # Create checks
-    eval "function _tmp_func { mkl_lib_check \"$2\" \"$3\" \"$4\" CC \"$7\"; }"
-    mkl_func_push MKL_CHECKS "$MKL_MODULE" _tmp_func
-}
-
-
-
-
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mklove/modules/configure.builtin
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.builtin b/thirdparty/librdkafka-0.11.1/mklove/modules/configure.builtin
deleted file mode 100644
index 546cbb2..0000000
--- a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.builtin
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/bash
-#
-# mklove builtin checks and options
-# Sets:
-#  prefix, etc..
-
-
-mkl_option "Standard" prefix "--prefix=PATH" \
-    "Install arch-independent files in PATH" "/usr/local"
-mkl_option "Standard" exec_prefix "--exec-prefix=PATH" \
-    "Install arch-dependent files in PATH" "\$prefix"
-mkl_option "Standard" bindir "--bindir=PATH" "User executables" "\$exec_prefix/bin"
-mkl_option "Standard" sbindir "--sbindir=PATH" "System admin executables" \
-    "\$exec_prefix/sbin"
-mkl_option "Standard" libexecdir "--libexecdir=PATH" "Program executables" \
-    "\$exec_prefix/libexec"
-mkl_option "Standard" datadir "--datadir=PATH" "Read-only arch-independent data" \
-    "\$prefix/share"
-mkl_option "Standard" sysconfdir "--sysconfdir=PATH" "Configuration data" \
-    "\$prefix/etc"
-mkl_option "Standard" sharedstatedir "--sharedstatedir=PATH" \
-    "Modifiable arch-independent data" "\$prefix/com"
-mkl_option "Standard" localstatedir "--localstatedir=PATH" \
-    "Modifiable local state data" "\$prefix/var"
-mkl_option "Standard" libdir "--libdir=PATH" "Libraries" "\$exec_prefix/lib"
-mkl_option "Standard" includedir "--includedir=PATH" "C/C++ header files" \
-    "\$prefix/include"
-mkl_option "Standard" infodir "--infodir=PATH" "Info documentation" "\$prefix/info"
-mkl_option "Standard" mandir "--mandir=PATH" "Manual pages" "\$prefix/man"
-
-mkl_option "Configure tool" "" "--list-modules" "List loaded mklove modules"
-mkl_option "Configure tool" "" "--list-checks" "List checks"
-mkl_option "Configure tool" env:MKL_FAILFATAL "--fail-fatal" "All failures are fatal"
-mkl_option "Configure tool" env:MKL_NOCACHE "--no-cache" "Dont use or generate config.cache"
-mkl_option "Configure tool" env:MKL_DEBUG "--debug" "Enable configure debugging"
-mkl_option "Configure tool" env:MKL_CLEAN "--clean" "Remove generated configure files"
-mkl_option "Configure tool" "" "--reconfigure" "Rerun configure with same arguments as last run"
-mkl_option "Configure tool" env:MKL_NO_DOWNLOAD "--no-download" "Disable downloads of required mklove modules"
-mkl_option "Configure tool" env:MKL_UPDATE_MODS "--update-modules" "Update modules from global repository"
-mkl_option "Configure tool" env:MKL_REPO_URL "--repo-url=URL_OR_PATH" "Override mklove modules repo URL" "$MKL_REPO_URL"
-mkl_option "Configure tool" "" "--help" "Show configure usage"
-
-
-mkl_toggle_option "Compatibility" "mk:MKL_MAINT_MODE" "--enable-maintainer-mode" "Maintainer mode (no-op)"
-
-mkl_option "Configure tool" "mk:PROGRAM_PREFIX" "--program-prefix=PFX" "Program prefix"
-
-mkl_option "Compatibility" "mk:DISABL_DEP_TRACK" "--disable-dependency-tracking" "Disable dependency tracking (no-op)"
-mkl_option "Compatibility" "mk:DISABL_SILENT_RULES" "--disable-silent-rules" "Verbose build output (no-op)"
-
-
-function checks {
-
-    if [[ ! -z $libdir ]]; then
-	mkl_mkvar_append "libdir" LDFLAGS "-L${libdir}"
-    fi
-
-    if [[ ! -z $includedir ]]; then
-	mkl_mkvar_append "includedir" CPPFLAGS "-I${includedir}"
-    fi
-
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/mklove/modules/configure.cc
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.cc b/thirdparty/librdkafka-0.11.1/mklove/modules/configure.cc
deleted file mode 100644
index 79ce7fb..0000000
--- a/thirdparty/librdkafka-0.11.1/mklove/modules/configure.cc
+++ /dev/null
@@ -1,178 +0,0 @@
-#!/bin/bash
-#
-# Compiler detection
-# Sets:
-#  CC, CXX, CFLAGS, CPPFLAGS, LDFLAGS, ARFLAGS, PKG_CONFIG, INSTALL, MBITS
-
-
-mkl_require host
-
-function checks {
-
-    # C compiler
-    mkl_meta_set "ccenv" "name" "C compiler from CC env"
-    if ! mkl_command_check "ccenv" "WITH_CC" cont "$CC --version"; then
-        if mkl_command_check "gcc" "WITH_GCC" cont "gcc --version"; then
-            CC=gcc
-        elif mkl_command_check "clang" "WITH_CLANG" cont "clang --version"; then
-            CC=clang
-        elif mkl_command_check "cc" "WITH_CC" fail "cc --version"; then
-            CC=cc
-        fi
-    fi
-    export CC="${CC}"
-    mkl_mkvar_set CC CC "$CC"
-
-    if [[ $MKL_CC_WANT_CXX == 1 ]]; then
-    # C++ compiler
-        mkl_meta_set "cxxenv" "name" "C++ compiler from CXX env"
-        if ! mkl_command_check "cxxenv" "WITH_CXX" cont "$CXX --version" ; then
-            mkl_meta_set "gxx" "name" "C++ compiler (g++)"
-            mkl_meta_set "clangxx" "name" "C++ compiler (clang++)"
-            mkl_meta_set "cxx" "name" "C++ compiler (c++)"
-            if mkl_command_check "gxx" "WITH_GXX" cont "g++ --version"; then
-                CXX=g++
-            elif mkl_command_check "clangxx" "WITH_CLANGXX" cont "clang++ --version"; then
-                CXX=clang++
-            elif mkl_command_check "cxx" "WITH_CXX" fail "c++ --version"; then
-                CXX=c++
-            fi
-        fi
-        export CXX="${CXX}"
-        mkl_mkvar_set "CXX" CXX $CXX
-    fi
-
-    # Handle machine bits, if specified.
-    if [[ ! -z "$MBITS" ]]; then
-	mkl_meta_set mbits_m name "mbits compiler flag (-m$MBITS)"
-	if mkl_compile_check mbits_m "" fail CC "-m$MBITS"; then
-	    mkl_mkvar_append CPPFLAGS CPPFLAGS "-m$MBITS"
-	    mkl_mkvar_append LDFLAGS LDFLAGS "-m$MBITS"
-	fi
-	if [[ -z "$ARFLAGS" && $MBITS == 64 && $MKL_DISTRO == "SunOS" ]]; then
-	    # Turn on 64-bit archives on SunOS
-	    mkl_mkvar_append ARFLAGS ARFLAGS "S"
-	fi
-    fi
-
-    # Provide prefix and checks for various other build tools.
-    local t=
-    for t in LD:ld NM:nm OBJDUMP:objdump STRIP:strip ; do
-        local tenv=${t%:*}
-        t=${t#*:}
-	local tval="${!tenv}"
-
-        [[ -z $tval ]] && tval="$t"
-
-        if mkl_prog_check "$t" "" disable "$tval" ; then
-            if [[ $tval != ${!tenv} ]]; then
-		export "$tenv"="$tval"
-	    fi
-            mkl_mkvar_set $tenv $tenv "$tval"
-        fi
-    done
-
-    # Compiler and linker flags
-    [[ ! -z $CFLAGS ]]   && mkl_mkvar_set "CFLAGS" "CFLAGS" "$CFLAGS"
-    [[ ! -z $CPPFLAGS ]] && mkl_mkvar_set "CPPFLAGS" "CPPFLAGS" "$CPPFLAGS"
-    [[ ! -z $CXXFLAGS ]] && mkl_mkvar_set "CXXFLAGS" "CXXFLAGS" "$CXXFLAGS"
-    [[ ! -z $LDFLAGS ]]  && mkl_mkvar_set "LDFLAGS" "LDFLAGS" "$LDFLAGS"
-    [[ ! -z $ARFLAGS ]]  && mkl_mkvar_set "ARFLAGS" "ARFLAGS" "$ARFLAGS"
-
-    if [[ $MKL_NO_DEBUG_SYMBOLS != "y" ]]; then
-        # Add debug symbol flag (-g)
-        # OSX 10.9 requires -gstrict-dwarf for some reason.
-        mkl_meta_set cc_g_dwarf name "debug symbols compiler flag (-g...)"
-        if [[ $MKL_DISTRO == "osx" ]]; then
-            if mkl_compile_check cc_g_dwarf "" cont CC "-gstrict-dwarf"; then
-                mkl_mkvar_append CPPFLAGS CPPFLAGS "-gstrict-dwarf"
-            else
-                mkl_mkvar_append CPPFLAGS CPPFLAGS "-g"
-            fi
-        else
-            mkl_mkvar_append CPPFLAGS CPPFLAGS "-g"
-        fi
-    fi
-
-
-    # pkg-config
-    if [ -z "$PKG_CONFIG" ]; then
-        PKG_CONFIG=pkg-config
-    fi
-
-    if mkl_command_check "pkgconfig" "WITH_PKGCONFIG" cont "$PKG_CONFIG --version"; then
-        export PKG_CONFIG
-    fi
-    mkl_mkvar_set "pkgconfig" PKG_CONFIG $PKG_CONFIG
-
-    [[ ! -z "$PKG_CONFIG_PATH" ]] && mkl_env_append PKG_CONFIG_PATH "$PKG_CONFIG_PATH"
-
-    # install
-    if [ -z "$INSTALL" ]; then
-	if [[ $MKL_DISTRO == "SunOS" ]]; then
-	    mkl_meta_set ginstall name "GNU install"
-	    if mkl_command_check ginstall "" ignore "ginstall --version"; then
-		INSTALL=ginstall
-	    else
-		INSTALL=install
-	    fi
-        else
-            INSTALL=install
-	fi
-    fi
-
-    if mkl_command_check "install" "WITH_INSTALL" cont "$INSTALL --version"; then
-        export INSTALL
-    fi
-    mkl_mkvar_set "install" INSTALL $INSTALL
-
-
-    # Enable profiling if desired
-    if [[ $WITH_PROFILING == y ]]; then
-        mkl_allvar_set "" "WITH_PROFILING" "y"
-        mkl_mkvar_append CPPFLAGS CPPFLAGS "-pg"
-        mkl_mkvar_append LDFLAGS LDFLAGS   "-pg"
-    fi
-
-    # Optimization
-    if [[ $WITHOUT_OPTIMIZATION == n ]]; then
-        mkl_mkvar_append CPPFLAGS CPPFLAGS "-O2"
-    else
-        mkl_mkvar_append CPPFLAGS CPPFLAGS "-O0"
-    fi
-
-    # Static linking
-    if [[ $WITH_STATIC_LINKING == y ]]; then
-        # LDFLAGS_STATIC is the LDFLAGS needed to enable static linking
-        # of sub-sequent libraries, while
-        # LDFLAGS_DYNAMIC is the LDFLAGS needed to enable dynamic linking.
-        if [[ $MKL_DISTRO != "osx" ]]; then
-            mkl_mkvar_set staticlinking LDFLAGS_STATIC  "-Wl,-Bstatic"
-            mkl_mkvar_set staticlinking LDFLAGS_DYNAMIC "-Wl,-Bdynamic"
-            mkl_mkvar_set staticlinking HAS_LDFLAGS_STATIC y
-        else
-            # OSX linker can't enable/disable static linking so we'll
-            # need to find the .a through STATIC_LIB_libname env var
-            mkl_mkvar_set staticlinking HAS_LDFLAGS_STATIC n
-        fi
-    fi
-}
-
-
-mkl_option "Compiler" "env:CC" "--cc=CC" "Build using C compiler CC" "\$CC"
-mkl_option "Compiler" "env:CXX" "--cxx=CXX" "Build using C++ compiler CXX" "\$CXX"
-mkl_option "Compiler" "ARCH" "--arch=ARCH" "Build for architecture" "$(uname -m)"
-mkl_option "Compiler" "CPU" "--cpu=CPU" "Build and optimize for specific CPU" "generic"
-mkl_option "Compiler" "MBITS" "--mbits=BITS" "Machine bits (32 or 64)" ""
-
-for n in CFLAGS CPPFLAGS CXXFLAGS LDFLAGS ARFLAGS; do
-    mkl_option "Compiler" "mk:$n" "--$n=$n" "Add $n flags"
-done
-
-mkl_option "Compiler" "env:PKG_CONFIG_PATH" "--pkg-config-path" "Extra paths for pkg-config"
-
-mkl_option "Compiler" "WITH_PROFILING" "--enable-profiling" "Enable profiling"
-mkl_option "Compiler" "WITH_STATIC_LINKING" "--enable-static" "Enable static linking"
-mkl_option "Compiler" "WITHOUT_OPTIMIZATION" "--disable-optimization" "Disable optimization flag to compiler" "n"
-mkl_option "Compiler" "env:MKL_NO_DEBUG_SYMBOLS" "--disable-debug-symbols" "Disable debugging symbols" "n"
-mkl_option "Compiler" "env:MKL_WANT_WERROR" "--enable-werror" "Enable compiler warnings as errors" "n"


[31/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_broker.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_broker.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_broker.h
deleted file mode 100644
index a30f2bd..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_broker.h
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include "rdkafka_feature.h"
-
-
-extern const char *rd_kafka_broker_state_names[];
-extern const char *rd_kafka_secproto_names[];
-
-struct rd_kafka_broker_s { /* rd_kafka_broker_t */
-	TAILQ_ENTRY(rd_kafka_broker_s) rkb_link;
-
-	int32_t             rkb_nodeid;
-#define RD_KAFKA_NODEID_UA -1
-
-	rd_sockaddr_list_t *rkb_rsal;
-	time_t              rkb_t_rsal_last;
-        const rd_sockaddr_inx_t  *rkb_addr_last; /* Last used connect address */
-
-	rd_kafka_transport_t *rkb_transport;
-
-	uint32_t            rkb_corrid;
-	int                 rkb_connid;    /* Connection id, increased by
-					    * one for each connection by
-					    * this broker. Used as a safe-guard
-					    * to help troubleshooting buffer
-					    * problems across disconnects. */
-
-	rd_kafka_q_t       *rkb_ops;
-
-        mtx_t               rkb_lock;
-
-        int                 rkb_blocking_max_ms; /* Maximum IO poll blocking
-                                                  * time. */
-
-        /* Toppars handled by this broker */
-	TAILQ_HEAD(, rd_kafka_toppar_s) rkb_toppars;
-	int                 rkb_toppar_cnt;
-
-        /* Underflowed toppars that are eligible for fetching. */
-        CIRCLEQ_HEAD(, rd_kafka_toppar_s) rkb_fetch_toppars;
-        int                 rkb_fetch_toppar_cnt;
-        rd_kafka_toppar_t  *rkb_fetch_toppar_next;  /* Next 'first' toppar
-                                                     * in fetch list.
-                                                     * This is used for
-                                                     * round-robin. */
-
-
-        rd_kafka_cgrp_t    *rkb_cgrp;
-
-	rd_ts_t             rkb_ts_fetch_backoff;
-	int                 rkb_fetching;
-
-	enum {
-		RD_KAFKA_BROKER_STATE_INIT,
-		RD_KAFKA_BROKER_STATE_DOWN,
-		RD_KAFKA_BROKER_STATE_CONNECT,
-		RD_KAFKA_BROKER_STATE_AUTH,
-
-		/* Any state >= STATE_UP means the Kafka protocol layer
-		 * is operational (to some degree). */
-		RD_KAFKA_BROKER_STATE_UP,
-                RD_KAFKA_BROKER_STATE_UPDATE,
-		RD_KAFKA_BROKER_STATE_APIVERSION_QUERY,
-		RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE
-	} rkb_state;
-
-        rd_ts_t             rkb_ts_state;        /* Timestamp of last
-                                                  * state change */
-        rd_interval_t       rkb_timeout_scan_intvl;  /* Waitresp timeout scan
-                                                      * interval. */
-
-        rd_atomic32_t       rkb_blocking_request_cnt; /* The number of
-                                                       * in-flight blocking
-                                                       * requests.
-                                                       * A blocking request is
-                                                       * one that is known to
-                                                       * possibly block on the
-                                                       * broker for longer than
-                                                       * the typical processing
-                                                       * time, e.g.:
-                                                       * JoinGroup, SyncGroup */
-
-	int                 rkb_features;    /* Protocol features supported
-					      * by this broker.
-					      * See RD_KAFKA_FEATURE_* in
-					      * rdkafka_proto.h */
-
-        struct rd_kafka_ApiVersion *rkb_ApiVersions; /* Broker's supported APIs
-                                                      * (MUST be sorted) */
-	size_t                      rkb_ApiVersions_cnt;
-	rd_interval_t               rkb_ApiVersion_fail_intvl; /* Controls how long
-								* the fallback proto
-								* will be used after
-								* ApiVersionRequest
-								* failure. */
-
-	rd_kafka_confsource_t  rkb_source;
-	struct {
-		rd_atomic64_t tx_bytes;
-		rd_atomic64_t tx;    /* Kafka-messages (not payload msgs) */
-		rd_atomic64_t tx_err;
-		rd_atomic64_t tx_retries;
-		rd_atomic64_t req_timeouts;  /* Accumulated value */
-
-		rd_atomic64_t rx_bytes;
-		rd_atomic64_t rx;    /* Kafka messages (not payload msgs) */
-		rd_atomic64_t rx_err;
-		rd_atomic64_t rx_corrid_err; /* CorrId misses */
-		rd_atomic64_t rx_partial;    /* Partial messages received
-                                              * and dropped. */
-                rd_atomic64_t zbuf_grow;     /* Compression/decompression buffer grows needed */
-                rd_atomic64_t buf_grow;      /* rkbuf grows needed */
-                rd_atomic64_t wakeups;       /* Poll wakeups */
-	} rkb_c;
-
-        int                 rkb_req_timeouts;  /* Current value */
-
-	rd_ts_t             rkb_ts_metadata_poll; /* Next metadata poll time */
-	int                 rkb_metadata_fast_poll_cnt; /* Perform fast
-							 * metadata polls. */
-	thrd_t              rkb_thread;
-
-	rd_refcnt_t         rkb_refcnt;
-
-        rd_kafka_t         *rkb_rk;
-
-	rd_kafka_buf_t     *rkb_recv_buf;
-
-	int                 rkb_max_inflight;   /* Maximum number of in-flight
-						 * requests to broker.
-						 * Compared to rkb_waitresps length.*/
-	rd_kafka_bufq_t     rkb_outbufs;
-	rd_kafka_bufq_t     rkb_waitresps;
-	rd_kafka_bufq_t     rkb_retrybufs;
-
-	rd_avg_t            rkb_avg_int_latency;/* Current internal latency period*/
-	rd_avg_t            rkb_avg_rtt;        /* Current RTT period */
-	rd_avg_t            rkb_avg_throttle;   /* Current throttle period */
-
-        /* These are all protected by rkb_lock */
-	char                rkb_name[RD_KAFKA_NODENAME_SIZE];  /* Displ name */
-	char                rkb_nodename[RD_KAFKA_NODENAME_SIZE]; /* host:port*/
-        uint16_t            rkb_port;                          /* TCP port */
-        char               *rkb_origname;                      /* Original
-                                                                * host name */
-
-
-        /* Logging name is a copy of rkb_name, protected by its own mutex */
-        char               *rkb_logname;
-        mtx_t               rkb_logname_lock;
-
-        int                 rkb_wakeup_fd[2];     /* Wake-up fds (r/w) to wake
-                                                   * up from IO-wait when
-                                                   * queues have content. */
-        int                 rkb_toppar_wakeup_fd; /* Toppar msgq wakeup fd,
-                                                   * this is rkb_wakeup_fd[1]
-                                                   * if enabled. */
-        rd_interval_t       rkb_connect_intvl;    /* Reconnect throttling */
-
-	rd_kafka_secproto_t rkb_proto;
-
-	int                 rkb_down_reported;    /* Down event reported */
-#if WITH_SASL_CYRUS
-	rd_kafka_timer_t    rkb_sasl_kinit_refresh_tmr;
-#endif
-
-
-	struct {
-		char msg[512];
-		int  err;  /* errno */
-	} rkb_err;
-};
-
-#define rd_kafka_broker_keep(rkb)   rd_refcnt_add(&(rkb)->rkb_refcnt)
-#define rd_kafka_broker_lock(rkb)   mtx_lock(&(rkb)->rkb_lock)
-#define rd_kafka_broker_unlock(rkb) mtx_unlock(&(rkb)->rkb_lock)
-
-
-/**
- * @brief Broker comparator
- */
-static RD_UNUSED RD_INLINE int rd_kafka_broker_cmp (const void *_a,
-                                                    const void *_b) {
-        const rd_kafka_broker_t *a = _a, *b = _b;
-        return (int)(a - b);
-}
-
-
-/**
- * @returns true if broker supports \p features, else false.
- */
-static RD_UNUSED
-int rd_kafka_broker_supports (rd_kafka_broker_t *rkb, int features) {
-	int r;
-	rd_kafka_broker_lock(rkb);
-	r = (rkb->rkb_features & features) == features;
-	rd_kafka_broker_unlock(rkb);
-	return r;
-}
-
-int16_t rd_kafka_broker_ApiVersion_supported (rd_kafka_broker_t *rkb,
-                                              int16_t ApiKey,
-                                              int16_t minver, int16_t maxver,
-                                              int *featuresp);
-
-int rd_kafka_broker_get_state (rd_kafka_broker_t *rkb);
-
-rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid (rd_kafka_t *rk,
-						   int32_t nodeid);
-rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0 (rd_kafka_t *rk,
-                                                    int32_t nodeid,
-                                                    int state);
-#define rd_kafka_broker_find_by_nodeid(rk,nodeid) \
-        rd_kafka_broker_find_by_nodeid0(rk,nodeid,-1)
-
-/**
- * Filter out brokers that are currently in a blocking request.
- */
-static RD_INLINE RD_UNUSED int
-rd_kafka_broker_filter_non_blocking (rd_kafka_broker_t *rkb, void *opaque) {
-        return rd_atomic32_get(&rkb->rkb_blocking_request_cnt) > 0;
-}
-
-/**
- * Filter out brokers that cant do GroupCoordinator requests right now.
- */
-static RD_INLINE RD_UNUSED int
-rd_kafka_broker_filter_can_group_query (rd_kafka_broker_t *rkb, void *opaque) {
-        return rd_atomic32_get(&rkb->rkb_blocking_request_cnt) > 0 ||
-		!(rkb->rkb_features & RD_KAFKA_FEATURE_BROKER_GROUP_COORD);
-}
-
-rd_kafka_broker_t *rd_kafka_broker_any (rd_kafka_t *rk, int state,
-                                        int (*filter) (rd_kafka_broker_t *rkb,
-                                                       void *opaque),
-                                        void *opaque);
-
-rd_kafka_broker_t *rd_kafka_broker_any_usable (rd_kafka_t *rk, int timeout_ms,
-                                               int do_lock);
-
-rd_kafka_broker_t *rd_kafka_broker_prefer (rd_kafka_t *rk, int32_t broker_id, int state);
-
-int rd_kafka_brokers_add0 (rd_kafka_t *rk, const char *brokerlist);
-void rd_kafka_broker_set_state (rd_kafka_broker_t *rkb, int state);
-
-void rd_kafka_broker_fail (rd_kafka_broker_t *rkb,
-			   int level, rd_kafka_resp_err_t err,
-			   const char *fmt, ...);
-
-void rd_kafka_broker_destroy_final (rd_kafka_broker_t *rkb);
-
-#define rd_kafka_broker_destroy(rkb)                                    \
-        rd_refcnt_destroywrapper(&(rkb)->rkb_refcnt,                    \
-                                 rd_kafka_broker_destroy_final(rkb))
-
-
-void rd_kafka_broker_update (rd_kafka_t *rk, rd_kafka_secproto_t proto,
-                             const struct rd_kafka_metadata_broker *mdb);
-rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk,
-					rd_kafka_confsource_t source,
-					rd_kafka_secproto_t proto,
-					const char *name, uint16_t port,
-					int32_t nodeid);
-
-void rd_kafka_broker_connect_up (rd_kafka_broker_t *rkb);
-void rd_kafka_broker_connect_done (rd_kafka_broker_t *rkb, const char *errstr);
-
-int rd_kafka_send (rd_kafka_broker_t *rkb);
-int rd_kafka_recv (rd_kafka_broker_t *rkb);
-
-void rd_kafka_dr_msgq (rd_kafka_itopic_t *rkt,
-		       rd_kafka_msgq_t *rkmq, rd_kafka_resp_err_t err);
-
-void rd_kafka_broker_buf_enq1 (rd_kafka_broker_t *rkb,
-                               rd_kafka_buf_t *rkbuf,
-                               rd_kafka_resp_cb_t *resp_cb,
-                               void *opaque);
-
-void rd_kafka_broker_buf_enq_replyq (rd_kafka_broker_t *rkb,
-                                     rd_kafka_buf_t *rkbuf,
-                                     rd_kafka_replyq_t replyq,
-                                     rd_kafka_resp_cb_t *resp_cb,
-                                     void *opaque);
-
-void rd_kafka_broker_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf);
-
-
-rd_kafka_broker_t *rd_kafka_broker_internal (rd_kafka_t *rk);
-
-void msghdr_print (rd_kafka_t *rk,
-		   const char *what, const struct msghdr *msg,
-		   int hexdump);
-
-const char *rd_kafka_broker_name (rd_kafka_broker_t *rkb);
-void rd_kafka_broker_wakeup (rd_kafka_broker_t *rkb);
-
-int rd_kafka_brokers_get_state_version (rd_kafka_t *rk);
-int rd_kafka_brokers_wait_state_change (rd_kafka_t *rk, int stored_version,
-					int timeout_ms);
-void rd_kafka_brokers_broadcast_state_change (rd_kafka_t *rk);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_buf.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_buf.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_buf.c
deleted file mode 100644
index 9b50737..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_buf.c
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_buf.h"
-#include "rdkafka_broker.h"
-
-void rd_kafka_buf_destroy_final (rd_kafka_buf_t *rkbuf) {
-
-        switch (rkbuf->rkbuf_reqhdr.ApiKey)
-        {
-        case RD_KAFKAP_Metadata:
-                if (rkbuf->rkbuf_u.Metadata.topics)
-                        rd_list_destroy(rkbuf->rkbuf_u.Metadata.topics);
-                if (rkbuf->rkbuf_u.Metadata.reason)
-                        rd_free(rkbuf->rkbuf_u.Metadata.reason);
-                if (rkbuf->rkbuf_u.Metadata.rko)
-                        rd_kafka_op_reply(rkbuf->rkbuf_u.Metadata.rko,
-                                          RD_KAFKA_RESP_ERR__DESTROY);
-                if (rkbuf->rkbuf_u.Metadata.decr) {
-                        /* Decrease metadata cache's full_.._sent state. */
-                        mtx_lock(rkbuf->rkbuf_u.Metadata.decr_lock);
-                        rd_kafka_assert(NULL,
-                                        (*rkbuf->rkbuf_u.Metadata.decr) > 0);
-                        (*rkbuf->rkbuf_u.Metadata.decr)--;
-                        mtx_unlock(rkbuf->rkbuf_u.Metadata.decr_lock);
-                }
-                break;
-        }
-
-        if (rkbuf->rkbuf_response)
-                rd_kafka_buf_destroy(rkbuf->rkbuf_response);
-
-        rd_kafka_replyq_destroy(&rkbuf->rkbuf_replyq);
-        rd_kafka_replyq_destroy(&rkbuf->rkbuf_orig_replyq);
-
-        rd_buf_destroy(&rkbuf->rkbuf_buf);
-
-        if (rkbuf->rkbuf_rktp_vers)
-                rd_list_destroy(rkbuf->rkbuf_rktp_vers);
-
-        if (rkbuf->rkbuf_rkb)
-                rd_kafka_broker_destroy(rkbuf->rkbuf_rkb);
-
-        rd_refcnt_destroy(&rkbuf->rkbuf_refcnt);
-
-	rd_free(rkbuf);
-}
-
-
-
-/**
- * @brief Pushes \p buf of size \p len as a new segment on the buffer.
- *
- * \p buf will NOT be freed by the buffer.
- */
-void rd_kafka_buf_push0 (rd_kafka_buf_t *rkbuf, const void *buf, size_t len,
-                         int allow_crc_calc, void (*free_cb) (void *)) {
-        rd_buf_push(&rkbuf->rkbuf_buf, buf, len, free_cb);
-
-        if (allow_crc_calc && (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC))
-                rkbuf->rkbuf_crc = rd_crc32_update(rkbuf->rkbuf_crc, buf, len);
-}
-
-
-
-/**
- * @brief Create a new buffer with \p segcmt initial segments and \p size bytes
- *        of initial backing memory.
- *        The underlying buffer will grow as needed.
- *
- * If \p rk is non-NULL (typical case):
- * Additional space for the Kafka protocol headers is inserted automatically.
- */
-rd_kafka_buf_t *rd_kafka_buf_new0 (int segcnt, size_t size, int flags) {
-        rd_kafka_buf_t *rkbuf;
-
-        rkbuf = rd_calloc(1, sizeof(*rkbuf));
-
-        rkbuf->rkbuf_flags = flags;
-
-        rd_buf_init(&rkbuf->rkbuf_buf, segcnt, size);
-        rd_kafka_msgq_init(&rkbuf->rkbuf_msgq);
-        rd_refcnt_init(&rkbuf->rkbuf_refcnt, 1);
-
-        return rkbuf;
-}
-
-
-/**
- * @brief Create new request buffer with the request-header written (will
- *        need to be updated with Length, etc, later)
- */
-rd_kafka_buf_t *rd_kafka_buf_new_request (rd_kafka_broker_t *rkb, int16_t ApiKey,
-                                          int segcnt, size_t size) {
-        rd_kafka_buf_t *rkbuf;
-
-        /* Make room for common protocol request headers */
-        size += RD_KAFKAP_REQHDR_SIZE +
-                RD_KAFKAP_STR_SIZE(rkb->rkb_rk->rk_client_id);
-        segcnt += 1; /* headers */
-
-        rkbuf = rd_kafka_buf_new0(segcnt, size, 0);
-
-        rkbuf->rkbuf_rkb = rkb;
-        rd_kafka_broker_keep(rkb);
-
-        rkbuf->rkbuf_reqhdr.ApiKey = ApiKey;
-
-        /* Write request header, will be updated later. */
-        /* Length: updated later */
-        rd_kafka_buf_write_i32(rkbuf, 0);
-        /* ApiKey */
-        rd_kafka_buf_write_i16(rkbuf, rkbuf->rkbuf_reqhdr.ApiKey);
-        /* ApiVersion: updated later */
-        rd_kafka_buf_write_i16(rkbuf, 0);
-        /* CorrId: updated later */
-        rd_kafka_buf_write_i32(rkbuf, 0);
-
-        /* ClientId */
-        rd_kafka_buf_write_kstr(rkbuf, rkb->rkb_rk->rk_client_id);
-
-        return rkbuf;
-}
-
-
-
-
-/**
- * @brief Create new read-only rkbuf shadowing a memory region.
- *
- * @remark \p free_cb (possibly NULL) will be used to free \p ptr when
- *         buffer refcount reaches 0.
- * @remark the buffer may only be read from, not written to.
- */
-rd_kafka_buf_t *rd_kafka_buf_new_shadow (const void *ptr, size_t size,
-                                         void (*free_cb) (void *)) {
-	rd_kafka_buf_t *rkbuf;
-
-	rkbuf = rd_calloc(1, sizeof(*rkbuf));
-
-        rkbuf->rkbuf_reqhdr.ApiKey = RD_KAFKAP_None;
-
-        rd_buf_init(&rkbuf->rkbuf_buf, 1, 0);
-        rd_buf_push(&rkbuf->rkbuf_buf, ptr, size, free_cb);
-
-        rkbuf->rkbuf_totlen  = size;
-
-        /* Initialize reader slice */
-        rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
-
-	rd_kafka_msgq_init(&rkbuf->rkbuf_msgq);
-
-        rd_refcnt_init(&rkbuf->rkbuf_refcnt, 1);
-
-	return rkbuf;
-}
-
-
-
-void rd_kafka_bufq_enq (rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) {
-	TAILQ_INSERT_TAIL(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link);
-	(void)rd_atomic32_add(&rkbufq->rkbq_cnt, 1);
-	(void)rd_atomic32_add(&rkbufq->rkbq_msg_cnt,
-                            rd_atomic32_get(&rkbuf->rkbuf_msgq.rkmq_msg_cnt));
-}
-
-void rd_kafka_bufq_deq (rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) {
-	TAILQ_REMOVE(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link);
-	rd_kafka_assert(NULL, rd_atomic32_get(&rkbufq->rkbq_cnt) > 0);
-	(void)rd_atomic32_sub(&rkbufq->rkbq_cnt, 1);
-	(void)rd_atomic32_sub(&rkbufq->rkbq_msg_cnt,
-                          rd_atomic32_get(&rkbuf->rkbuf_msgq.rkmq_msg_cnt));
-}
-
-void rd_kafka_bufq_init(rd_kafka_bufq_t *rkbufq) {
-	TAILQ_INIT(&rkbufq->rkbq_bufs);
-	rd_atomic32_init(&rkbufq->rkbq_cnt, 0);
-	rd_atomic32_init(&rkbufq->rkbq_msg_cnt, 0);
-}
-
-/**
- * Concat all buffers from 'src' to tail of 'dst'
- */
-void rd_kafka_bufq_concat (rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src) {
-	TAILQ_CONCAT(&dst->rkbq_bufs, &src->rkbq_bufs, rkbuf_link);
-	(void)rd_atomic32_add(&dst->rkbq_cnt, rd_atomic32_get(&src->rkbq_cnt));
-	(void)rd_atomic32_add(&dst->rkbq_msg_cnt, rd_atomic32_get(&src->rkbq_msg_cnt));
-	rd_kafka_bufq_init(src);
-}
-
-/**
- * Purge the wait-response queue.
- * NOTE: 'rkbufq' must be a temporary queue and not one of rkb_waitresps
- *       or rkb_outbufs since buffers may be re-enqueued on those queues.
- *       'rkbufq' needs to be bufq_init():ed before reuse after this call.
- */
-void rd_kafka_bufq_purge (rd_kafka_broker_t *rkb,
-                          rd_kafka_bufq_t *rkbufq,
-                          rd_kafka_resp_err_t err) {
-	rd_kafka_buf_t *rkbuf, *tmp;
-
-	rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
-	rd_rkb_dbg(rkb, QUEUE, "BUFQ", "Purging bufq with %i buffers",
-		   rd_atomic32_get(&rkbufq->rkbq_cnt));
-
-	TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) {
-                rd_kafka_buf_callback(rkb->rkb_rk, rkb, err, NULL, rkbuf);
-        }
-}
-
-
-/**
- * @brief Update bufq for connection reset:
- *
- * - Purge connection-setup API requests from the queue.
- * - Reset any partially sent buffer's offset. (issue #756)
- *
- * Request types purged:
- *   ApiVersion
- *   SaslHandshake
- */
-void rd_kafka_bufq_connection_reset (rd_kafka_broker_t *rkb,
-				     rd_kafka_bufq_t *rkbufq) {
-	rd_kafka_buf_t *rkbuf, *tmp;
-
-	rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
-	rd_rkb_dbg(rkb, QUEUE, "BUFQ",
-		   "Updating %d buffers on connection reset",
-		   rd_atomic32_get(&rkbufq->rkbq_cnt));
-
-	TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) {
-		switch (rkbuf->rkbuf_reqhdr.ApiKey)
-		{
-		case RD_KAFKAP_ApiVersion:
-		case RD_KAFKAP_SaslHandshake:
-			rd_kafka_bufq_deq(rkbufq, rkbuf);
-			rd_kafka_buf_callback(rkb->rkb_rk, rkb,
-					      RD_KAFKA_RESP_ERR__DESTROY,
-					      NULL, rkbuf);
-			break;
-                default:
-                        /* Reset buffer send position */
-                        rd_slice_seek(&rkbuf->rkbuf_reader, 0);
-                        break;
-		}
-        }
-}
-
-
-void rd_kafka_bufq_dump (rd_kafka_broker_t *rkb, const char *fac,
-			 rd_kafka_bufq_t *rkbq) {
-	rd_kafka_buf_t *rkbuf;
-	int cnt = rd_kafka_bufq_cnt(rkbq);
-	rd_ts_t now;
-
-	if (!cnt)
-		return;
-
-	now = rd_clock();
-
-	rd_rkb_dbg(rkb, BROKER, fac, "bufq with %d buffer(s):", cnt);
-
-	TAILQ_FOREACH(rkbuf, &rkbq->rkbq_bufs, rkbuf_link) {
-		rd_rkb_dbg(rkb, BROKER, fac,
-			   " Buffer %s (%"PRIusz" bytes, corrid %"PRId32", "
-			   "connid %d, retry %d in %lldms, timeout in %lldms",
-			   rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
-			   rkbuf->rkbuf_totlen, rkbuf->rkbuf_corrid,
-			   rkbuf->rkbuf_connid, rkbuf->rkbuf_retries,
-			   rkbuf->rkbuf_ts_retry ?
-			   (now - rkbuf->rkbuf_ts_retry) / 1000LL : 0,
-			   rkbuf->rkbuf_ts_timeout ?
-			   (now - rkbuf->rkbuf_ts_timeout) / 1000LL : 0);
-	}
-}
-
-
-
-
-/**
- * Retry failed request, depending on the error.
- * @remark \p rkb may be NULL
- * Returns 1 if the request was scheduled for retry, else 0.
- */
-int rd_kafka_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) {
-
-        if (unlikely(!rkb ||
-		     rkb->rkb_source == RD_KAFKA_INTERNAL ||
-		     rd_kafka_terminating(rkb->rkb_rk) ||
-		     rkbuf->rkbuf_retries + 1 >
-		     rkb->rkb_rk->rk_conf.max_retries))
-                return 0;
-
-	/* Try again */
-	rkbuf->rkbuf_ts_sent = 0;
-	rkbuf->rkbuf_retries++;
-	rd_kafka_buf_keep(rkbuf);
-	rd_kafka_broker_buf_retry(rkb, rkbuf);
-	return 1;
-}
-
-
-/**
- * @brief Handle RD_KAFKA_OP_RECV_BUF.
- */
-void rd_kafka_buf_handle_op (rd_kafka_op_t *rko, rd_kafka_resp_err_t err) {
-        rd_kafka_buf_t *request, *response;
-
-        request = rko->rko_u.xbuf.rkbuf;
-        rko->rko_u.xbuf.rkbuf = NULL;
-
-        /* NULL on op_destroy() */
-	if (request->rkbuf_replyq.q) {
-		int32_t version = request->rkbuf_replyq.version;
-                /* Current queue usage is done, but retain original replyq for
-                 * future retries, stealing
-                 * the current reference. */
-                request->rkbuf_orig_replyq = request->rkbuf_replyq;
-                rd_kafka_replyq_clear(&request->rkbuf_replyq);
-		/* Callback might need to version check so we retain the
-		 * version across the clear() call which clears it. */
-		request->rkbuf_replyq.version = version;
-	}
-
-	if (!request->rkbuf_cb) {
-		rd_kafka_buf_destroy(request);
-		return;
-	}
-
-        /* Let buf_callback() do destroy()s */
-        response = request->rkbuf_response; /* May be NULL */
-        request->rkbuf_response = NULL;
-
-        rd_kafka_buf_callback(request->rkbuf_rkb->rkb_rk,
-			      request->rkbuf_rkb, err,
-                              response, request);
-}
-
-
-
-/**
- * Call request.rkbuf_cb(), but:
- *  - if the rkbuf has a rkbuf_replyq the buffer is enqueued on that queue
- *    with op type RD_KAFKA_OP_RECV_BUF.
- *  - else call rkbuf_cb().
- *
- * \p response may be NULL.
- *
- * Will decrease refcount for both response and request, eventually.
- */
-void rd_kafka_buf_callback (rd_kafka_t *rk,
-			    rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err,
-                            rd_kafka_buf_t *response, rd_kafka_buf_t *request){
-
-        /* Decide if the request should be retried.
-         * This is always done in the originating broker thread. */
-        if (unlikely(err && err != RD_KAFKA_RESP_ERR__DESTROY &&
-		     rd_kafka_buf_retry(rkb, request))) {
-		/* refcount for retry was increased in buf_retry() so we can
-		 * let go of this caller's refcounts. */
-		rd_kafka_buf_destroy(request);
-		if (response)
-			rd_kafka_buf_destroy(response);
-                return;
-	}
-
-        if (err != RD_KAFKA_RESP_ERR__DESTROY && request->rkbuf_replyq.q) {
-                rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF);
-
-		rd_kafka_assert(NULL, !request->rkbuf_response);
-		request->rkbuf_response = response;
-
-                /* Increment refcnt since rko_rkbuf will be decref:ed
-                 * if replyq_enq() fails and we dont want the rkbuf gone in that
-                 * case. */
-                rd_kafka_buf_keep(request);
-                rko->rko_u.xbuf.rkbuf = request;
-
-                rko->rko_err = err;
-
-                /* Copy original replyq for future retries, with its own
-                 * queue reference. */
-                rd_kafka_replyq_copy(&request->rkbuf_orig_replyq,
-                                     &request->rkbuf_replyq);
-
-	        rd_kafka_replyq_enq(&request->rkbuf_replyq, rko, 0);
-
-		rd_kafka_buf_destroy(request); /* from keep above */
-		return;
-        }
-
-        if (request->rkbuf_cb)
-                request->rkbuf_cb(rk, rkb, err, response, request,
-                                  request->rkbuf_opaque);
-
-        rd_kafka_buf_destroy(request);
-	if (response)
-		rd_kafka_buf_destroy(response);
-}
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_buf.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_buf.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_buf.h
deleted file mode 100644
index 5aa2876..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_buf.h
+++ /dev/null
@@ -1,819 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#pragma once
-
-#include "rdkafka_int.h"
-#include "rdcrc32.h"
-#include "rdlist.h"
-#include "rdbuf.h"
-
-
-typedef struct rd_kafka_broker_s rd_kafka_broker_t;
-
-#define RD_KAFKA_HEADERS_IOV_CNT   2
-
-
-/**
- * Temporary buffer with memory aligned writes to accommodate
- * effective and platform safe struct writes.
- */
-typedef struct rd_tmpabuf_s {
-	size_t size;
-	size_t of;
-	char  *buf;
-	int    failed;
-	int    assert_on_fail;
-} rd_tmpabuf_t;
-
-/**
- * @brief Allocate new tmpabuf with \p size bytes pre-allocated.
- */
-static RD_UNUSED void
-rd_tmpabuf_new (rd_tmpabuf_t *tab, size_t size, int assert_on_fail) {
-	tab->buf = rd_malloc(size);
-	tab->size = size;
-	tab->of = 0;
-	tab->failed = 0;
-	tab->assert_on_fail = assert_on_fail;
-}
-
-/**
- * @brief Free memory allocated by tmpabuf
- */
-static RD_UNUSED void
-rd_tmpabuf_destroy (rd_tmpabuf_t *tab) {
-	rd_free(tab->buf);
-}
-
-/**
- * @returns 1 if a previous operation failed.
- */
-static RD_UNUSED RD_INLINE int
-rd_tmpabuf_failed (rd_tmpabuf_t *tab) {
-	return tab->failed;
-}
-
-/**
- * @brief Allocate \p size bytes for writing, returning an aligned pointer
- *        to the memory.
- * @returns the allocated pointer (within the tmpabuf) on success or
- *          NULL if the requested number of bytes + alignment is not available
- *          in the tmpabuf.
- */
-static RD_UNUSED void *
-rd_tmpabuf_alloc0 (const char *func, int line, rd_tmpabuf_t *tab, size_t size) {
-	void *ptr;
-
-	if (unlikely(tab->failed))
-		return NULL;
-
-	if (unlikely(tab->of + size > tab->size)) {
-		if (tab->assert_on_fail) {
-			fprintf(stderr,
-				"%s: %s:%d: requested size %zd + %zd > %zd\n",
-				__FUNCTION__, func, line, tab->of, size,
-				tab->size);
-			assert(!*"rd_tmpabuf_alloc: not enough size in buffer");
-		}
-		return NULL;
-	}
-
-        ptr = (void *)(tab->buf + tab->of);
-	tab->of += RD_ROUNDUP(size, 8);
-
-	return ptr;
-}
-
-#define rd_tmpabuf_alloc(tab,size) \
-	rd_tmpabuf_alloc0(__FUNCTION__,__LINE__,tab,size)
-
-/**
- * @brief Write \p buf of \p size bytes to tmpabuf memory in an aligned fashion.
- *
- * @returns the allocated and written-to pointer (within the tmpabuf) on success
- *          or NULL if the requested number of bytes + alignment is not available
- *          in the tmpabuf.
- */
-static RD_UNUSED void *
-rd_tmpabuf_write0 (const char *func, int line,
-		   rd_tmpabuf_t *tab, const void *buf, size_t size) {
-	void *ptr = rd_tmpabuf_alloc0(func, line, tab, size);
-
-	if (ptr)
-		memcpy(ptr, buf, size);
-
-	return ptr;
-}
-#define rd_tmpabuf_write(tab,buf,size) \
-	rd_tmpabuf_write0(__FUNCTION__, __LINE__, tab, buf, size)
-
-
-/**
- * @brief Wrapper for rd_tmpabuf_write() that takes a nul-terminated string.
- */
-static RD_UNUSED char *
-rd_tmpabuf_write_str0 (const char *func, int line,
-		       rd_tmpabuf_t *tab, const char *str) {
-	return rd_tmpabuf_write0(func, line, tab, str, strlen(str)+1);
-}
-#define rd_tmpabuf_write_str(tab,str) \
-	rd_tmpabuf_write_str0(__FUNCTION__, __LINE__, tab, str)
-
-
-
-/**
- * @name Read buffer interface
- *
- * Memory reading helper macros to be used when parsing network responses.
- *
- * Assumptions:
- *   - an 'err_parse:' goto-label must be available for error bailouts,
- *                     the error code will be set in rkbuf->rkbuf_err
- *   - local `int log_decode_errors` variable set to the logging level
- *     to log parse errors (or 0 to turn off logging).
- */
-
-#define rd_kafka_buf_parse_fail(rkbuf,...) do {				\
-                if (log_decode_errors > 0) {                            \
-			rd_kafka_assert(NULL, rkbuf->rkbuf_rkb);	\
-                        rd_rkb_log(rkbuf->rkbuf_rkb, log_decode_errors, \
-                                   "PROTOERR",                          \
-                                   "Protocol parse failure "            \
-                                   "at %"PRIusz"/%"PRIusz" (%s:%i) "    \
-                                   "(incorrect broker.version.fallback?)", \
-                                   rd_slice_offset(&rkbuf->rkbuf_reader), \
-                                   rd_slice_size(&rkbuf->rkbuf_reader), \
-                                   __FUNCTION__, __LINE__);             \
-                        rd_rkb_log(rkbuf->rkbuf_rkb, log_decode_errors, \
-				   "PROTOERR", __VA_ARGS__);		\
-                }                                                       \
-                (rkbuf)->rkbuf_err = RD_KAFKA_RESP_ERR__BAD_MSG;        \
-                goto err_parse;                                         \
-	} while (0)
-
-
-
-/**
- * Returns the number of remaining bytes available to read.
- */
-#define rd_kafka_buf_read_remain(rkbuf) \
-        rd_slice_remains(&(rkbuf)->rkbuf_reader)
-
-/**
- * Checks that at least 'len' bytes remain to be read in buffer, else fails.
- */
-#define rd_kafka_buf_check_len(rkbuf,len) do {                          \
-                size_t __len0 = (size_t)(len);                          \
-                if (unlikely(__len0 > rd_kafka_buf_read_remain(rkbuf))) { \
-                        rd_kafka_buf_parse_fail(                        \
-                                rkbuf,                                  \
-                                "expected %"PRIusz" bytes > %"PRIusz    \
-                                " remaining bytes",                     \
-                                __len0, rd_kafka_buf_read_remain(rkbuf)); \
-                        (rkbuf)->rkbuf_err = RD_KAFKA_RESP_ERR__BAD_MSG; \
-                        goto err_parse;                                 \
-                }                                                       \
-        } while (0)
-
-/**
- * Skip (as in read and ignore) the next 'len' bytes.
- */
-#define rd_kafka_buf_skip(rkbuf, len) do {                              \
-                size_t __len1 = (size_t)(len);                          \
-                if (__len1 &&                                           \
-                    !rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \
-                        rd_kafka_buf_check_len(rkbuf, __len1);           \
-        } while (0)
-
-/**
- * Skip (as in read and ignore) up to fixed position \p pos.
- */
-#define rd_kafka_buf_skip_to(rkbuf, pos) do {                           \
-                size_t __len1 = (size_t)(pos) -                         \
-                        rd_slice_offset(&(rkbuf)->rkbuf_reader);        \
-                if (__len1 &&                                           \
-                    !rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \
-                        rd_kafka_buf_check_len(rkbuf, __len1);           \
-        } while (0)
-
-
-
-/**
- * Read 'len' bytes and copy to 'dstptr'
- */
-#define rd_kafka_buf_read(rkbuf,dstptr,len) do {                        \
-                size_t __len2 = (size_t)(len);                          \
-                if (!rd_slice_read(&(rkbuf)->rkbuf_reader, dstptr, __len2))  \
-                        rd_kafka_buf_check_len(rkbuf, __len2);          \
-        } while (0)
-
-
-/**
- * @brief Read \p len bytes at slice offset \p offset and copy to \p dstptr
- *        without affecting the current reader position.
- */
-#define rd_kafka_buf_peek(rkbuf,offset,dstptr,len) do {                 \
-                size_t __len2 = (size_t)(len);                          \
-                if (!rd_slice_peek(&(rkbuf)->rkbuf_reader, offset,      \
-                                   dstptr, __len2))                     \
-                        rd_kafka_buf_check_len(rkbuf, (offset)+(__len2)); \
-        } while (0)
-
-
-/**
- * Read a 16,32,64-bit integer and store it in 'dstptr'
- */
-#define rd_kafka_buf_read_i64(rkbuf,dstptr) do {                        \
-                int64_t _v;                                             \
-                rd_kafka_buf_read(rkbuf, &_v, sizeof(_v));              \
-                *(dstptr) = be64toh(_v);                                \
-        } while (0)
-
-#define rd_kafka_buf_peek_i64(rkbuf,of,dstptr) do {                     \
-                int64_t _v;                                             \
-                rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v));          \
-                *(dstptr) = be64toh(_v);                                \
-        } while (0)
-
-#define rd_kafka_buf_read_i32(rkbuf,dstptr) do {                        \
-                int32_t _v;                                             \
-                rd_kafka_buf_read(rkbuf, &_v, sizeof(_v));              \
-                *(dstptr) = be32toh(_v);                                \
-        } while (0)
-
-/* Same as .._read_i32 but does a direct assignment.
- * dst is assumed to be a scalar, not pointer. */
-#define rd_kafka_buf_read_i32a(rkbuf, dst) do {				\
-                int32_t _v;                                             \
-		rd_kafka_buf_read(rkbuf, &_v, 4);			\
-		dst = (int32_t) be32toh(_v);				\
-	} while (0)
-
-#define rd_kafka_buf_read_i16(rkbuf,dstptr) do {                        \
-                int16_t _v;                                             \
-                rd_kafka_buf_read(rkbuf, &_v, sizeof(_v));              \
-                *(dstptr) = be16toh(_v);                                \
-        } while (0)
-
-
-#define rd_kafka_buf_read_i16a(rkbuf, dst) do {				\
-                int16_t _v;                                             \
-		rd_kafka_buf_read(rkbuf, &_v, 2);			\
-                dst = (int16_t)be16toh(_v);				\
-	} while (0)
-
-#define rd_kafka_buf_read_i8(rkbuf, dst) rd_kafka_buf_read(rkbuf, dst, 1)
-
-#define rd_kafka_buf_peek_i8(rkbuf,of,dst) rd_kafka_buf_peek(rkbuf,of,dst,1)
-
-
-/**
- * @brief Read varint and store in int64_t \p dst
- */
-#define rd_kafka_buf_read_varint(rkbuf,dst) do {                        \
-                int64_t _v;                                             \
-                size_t _r = rd_varint_dec_slice(&(rkbuf)->rkbuf_reader, &_v); \
-                if (unlikely(RD_UVARINT_UNDERFLOW(_r)))                 \
-                        rd_kafka_buf_parse_fail(rkbuf,                  \
-                                                "varint parsing failed: " \
-                                                "buffer underflow");    \
-                *(dst) = _v;                                            \
-        } while (0)
-
-/* Read Kafka String representation (2+N).
- * The kstr data will be updated to point to the rkbuf. */
-#define rd_kafka_buf_read_str(rkbuf, kstr) do {                         \
-                int _klen;                                              \
-                rd_kafka_buf_read_i16a(rkbuf, (kstr)->len);             \
-                _klen = RD_KAFKAP_STR_LEN(kstr);                        \
-                if (RD_KAFKAP_STR_LEN0(_klen) == 0)                     \
-                        (kstr)->str = NULL;                             \
-                else if (!((kstr)->str =                                \
-                           rd_slice_ensure_contig(&rkbuf->rkbuf_reader, \
-                                                     _klen)))           \
-                        rd_kafka_buf_check_len(rkbuf, _klen);           \
-        } while (0)
-
-/* Read Kafka String representation (2+N) and write it to the \p tmpabuf
- * with a trailing nul byte. */
-#define rd_kafka_buf_read_str_tmpabuf(rkbuf, tmpabuf, dst) do {		\
-                rd_kafkap_str_t _kstr;					\
-		size_t _slen;						\
-		char *_dst;						\
-		rd_kafka_buf_read_str(rkbuf, &_kstr);			\
-		_slen = RD_KAFKAP_STR_LEN(&_kstr);			\
-		if (!(_dst =						\
-		      rd_tmpabuf_write(tmpabuf, _kstr.str, _slen+1)))	\
-			rd_kafka_buf_parse_fail(			\
-				rkbuf,					\
-				"Not enough room in tmpabuf: "		\
-				"%"PRIusz"+%"PRIusz			\
-				" > %"PRIusz,				\
-				(tmpabuf)->of, _slen+1, (tmpabuf)->size); \
-		_dst[_slen] = '\0';					\
-		dst = (void *)_dst;					\
-	} while (0)
-
-/**
- * Skip a string.
- */
-#define rd_kafka_buf_skip_str(rkbuf) do {			\
-		int16_t _slen;					\
-		rd_kafka_buf_read_i16(rkbuf, &_slen);		\
-		rd_kafka_buf_skip(rkbuf, RD_KAFKAP_STR_LEN0(_slen));	\
-	} while (0)
-
-/* Read Kafka Bytes representation (4+N).
- *  The 'kbytes' will be updated to point to rkbuf data */
-#define rd_kafka_buf_read_bytes(rkbuf, kbytes) do {                     \
-                int _klen;                                              \
-                rd_kafka_buf_read_i32a(rkbuf, _klen);                   \
-                (kbytes)->len = _klen;                                  \
-                if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) {                  \
-                        (kbytes)->data = NULL;                          \
-                        (kbytes)->len = 0;                              \
-                } else if (RD_KAFKAP_BYTES_LEN(kbytes) == 0)            \
-                        (kbytes)->data = "";                            \
-                else if (!((kbytes)->data =                             \
-                           rd_slice_ensure_contig(&(rkbuf)->rkbuf_reader, \
-                                                  _klen)))              \
-                        rd_kafka_buf_check_len(rkbuf, _klen);           \
-        } while (0)
-
-
-/**
- * @brief Read \p size bytes from buffer, setting \p *ptr to the start
- *        of the memory region.
- */
-#define rd_kafka_buf_read_ptr(rkbuf,ptr,size) do {                      \
-                size_t _klen = size;                                    \
-                if (!(*(ptr) = (void *)                                 \
-                      rd_slice_ensure_contig(&(rkbuf)->rkbuf_reader, _klen))) \
-                        rd_kafka_buf_check_len(rkbuf, _klen);           \
-        } while (0)
-
-
-/**
- * @brief Read varint-lengted Kafka Bytes representation
- */
-#define rd_kafka_buf_read_bytes_varint(rkbuf,kbytes) do {               \
-                int64_t _len2;                                          \
-                size_t _r = rd_varint_dec_slice(&(rkbuf)->rkbuf_reader, \
-                                                &_len2);                \
-                if (unlikely(RD_UVARINT_UNDERFLOW(_r)))                 \
-                        rd_kafka_buf_parse_fail(rkbuf,                  \
-                                                "varint parsing failed: " \
-                                                "buffer underflow");    \
-                (kbytes)->len = (int32_t)_len2;                         \
-                if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) {                  \
-                        (kbytes)->data = NULL;                          \
-                        (kbytes)->len = 0;                              \
-                } else if (RD_KAFKAP_BYTES_LEN(kbytes) == 0)            \
-                        (kbytes)->data = "";                            \
-                else if (!((kbytes)->data =                             \
-                           rd_slice_ensure_contig(&(rkbuf)->rkbuf_reader, \
-                                                  _len2)))              \
-                        rd_kafka_buf_check_len(rkbuf, _len2);           \
-        } while (0)
-
-
-/**
- * Response handling callback.
- *
- * NOTE: Callbacks must check for 'err == RD_KAFKA_RESP_ERR__DESTROY'
- *       which indicates that some entity is terminating (rd_kafka_t, broker,
- *       toppar, queue, etc) and the callback may not be called in the
- *       correct thread. In this case the callback must perform just
- *       the most minimal cleanup and dont trigger any other operations.
- *
- * NOTE: rkb, reply and request may be NULL, depending on error situation.
- */
-typedef void (rd_kafka_resp_cb_t) (rd_kafka_t *rk,
-				   rd_kafka_broker_t *rkb,
-                                   rd_kafka_resp_err_t err,
-                                   rd_kafka_buf_t *reply,
-                                   rd_kafka_buf_t *request,
-                                   void *opaque);
-
-struct rd_kafka_buf_s { /* rd_kafka_buf_t */
-	TAILQ_ENTRY(rd_kafka_buf_s) rkbuf_link;
-
-	int32_t rkbuf_corrid;
-
-	rd_ts_t rkbuf_ts_retry;    /* Absolute send retry time */
-
-	int     rkbuf_flags; /* RD_KAFKA_OP_F */
-
-        rd_buf_t rkbuf_buf;        /**< Send/Recv byte buffer */
-        rd_slice_t rkbuf_reader;   /**< Buffer slice reader for rkbuf_buf */
-
-	int     rkbuf_connid;      /* broker connection id (used when buffer
-				    * was partially sent). */
-        size_t  rkbuf_totlen;      /* recv: total expected length,
-                                    * send: not used */
-
-	rd_crc32_t rkbuf_crc;      /* Current CRC calculation */
-
-	struct rd_kafkap_reqhdr rkbuf_reqhdr;   /* Request header.
-                                                 * These fields are encoded
-                                                 * and written to output buffer
-                                                 * on buffer finalization. */
-	struct rd_kafkap_reshdr rkbuf_reshdr;   /* Response header.
-                                                 * Decoded fields are copied
-                                                 * here from the buffer
-                                                 * to provide an ease-of-use
-                                                 * interface to the header */
-
-	int32_t rkbuf_expected_size;  /* expected size of message */
-
-        rd_kafka_replyq_t   rkbuf_replyq;       /* Enqueue response on replyq */
-        rd_kafka_replyq_t   rkbuf_orig_replyq;  /* Original replyq to be used
-                                                 * for retries from inside
-                                                 * the rkbuf_cb() callback
-                                                 * since rkbuf_replyq will
-                                                 * have been reset. */
-        rd_kafka_resp_cb_t *rkbuf_cb;           /* Response callback */
-        struct rd_kafka_buf_s *rkbuf_response;  /* Response buffer */
-
-        struct rd_kafka_broker_s *rkbuf_rkb;
-
-	rd_refcnt_t rkbuf_refcnt;
-	void   *rkbuf_opaque;
-
-	int     rkbuf_retries;            /* Retries so far. */
-#define RD_KAFKA_BUF_NO_RETRIES  1000000  /* Do not retry */
-
-        int     rkbuf_features;   /* Required feature(s) that must be
-                                   * supported by broker. */
-
-	rd_ts_t rkbuf_ts_enq;
-	rd_ts_t rkbuf_ts_sent;    /* Initially: Absolute time of transmission,
-				   * after response: RTT. */
-	rd_ts_t rkbuf_ts_timeout;
-
-        int64_t rkbuf_offset;     /* Used by OffsetCommit */
-
-	rd_list_t *rkbuf_rktp_vers;    /* Toppar + Op Version map.
-					* Used by FetchRequest. */
-
-	rd_kafka_msgq_t rkbuf_msgq;
-
-        rd_kafka_resp_err_t rkbuf_err;      /* Buffer parsing error code */
-
-        union {
-                struct {
-                        rd_list_t *topics;  /* Requested topics (char *) */
-                        char *reason;       /* Textual reason */
-                        rd_kafka_op_t *rko; /* Originating rko with replyq
-                                             * (if any) */
-                        int all_topics;     /* Full/All topics requested */
-
-                        int *decr;          /* Decrement this integer by one
-                                             * when request is complete:
-                                             * typically points to metadata
-                                             * cache's full_.._sent.
-                                             * Will be performed with
-                                             * decr_lock held. */
-                        mtx_t *decr_lock;
-
-                } Metadata;
-        } rkbuf_u;
-};
-
-
-typedef struct rd_kafka_bufq_s {
-	TAILQ_HEAD(, rd_kafka_buf_s) rkbq_bufs;
-	rd_atomic32_t  rkbq_cnt;
-	rd_atomic32_t  rkbq_msg_cnt;
-} rd_kafka_bufq_t;
-
-#define rd_kafka_bufq_cnt(rkbq) rd_atomic32_get(&(rkbq)->rkbq_cnt)
-
-
-#define rd_kafka_buf_keep(rkbuf) rd_refcnt_add(&(rkbuf)->rkbuf_refcnt)
-#define rd_kafka_buf_destroy(rkbuf)                                     \
-        rd_refcnt_destroywrapper(&(rkbuf)->rkbuf_refcnt,                \
-                                 rd_kafka_buf_destroy_final(rkbuf))
-
-void rd_kafka_buf_destroy_final (rd_kafka_buf_t *rkbuf);
-void rd_kafka_buf_push0 (rd_kafka_buf_t *rkbuf, const void *buf, size_t len,
-                         int allow_crc_calc, void (*free_cb) (void *));
-#define rd_kafka_buf_push(rkbuf,buf,len,free_cb)                        \
-        rd_kafka_buf_push0(rkbuf,buf,len,1/*allow_crc*/,free_cb)
-rd_kafka_buf_t *rd_kafka_buf_new0 (int segcnt, size_t size, int flags);
-#define rd_kafka_buf_new(segcnt,size) \
-        rd_kafka_buf_new0(segcnt,size,0)
-rd_kafka_buf_t *rd_kafka_buf_new_request (rd_kafka_broker_t *rkb, int16_t ApiKey,
-                                          int segcnt, size_t size);
-rd_kafka_buf_t *rd_kafka_buf_new_shadow (const void *ptr, size_t size,
-                                         void (*free_cb) (void *));
-void rd_kafka_bufq_enq (rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf);
-void rd_kafka_bufq_deq (rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf);
-void rd_kafka_bufq_init(rd_kafka_bufq_t *rkbufq);
-void rd_kafka_bufq_concat (rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src);
-void rd_kafka_bufq_purge (rd_kafka_broker_t *rkb,
-                          rd_kafka_bufq_t *rkbufq,
-                          rd_kafka_resp_err_t err);
-void rd_kafka_bufq_connection_reset (rd_kafka_broker_t *rkb,
-				     rd_kafka_bufq_t *rkbufq);
-void rd_kafka_bufq_dump (rd_kafka_broker_t *rkb, const char *fac,
-			 rd_kafka_bufq_t *rkbq);
-
-int rd_kafka_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf);
-
-void rd_kafka_buf_handle_op (rd_kafka_op_t *rko, rd_kafka_resp_err_t err);
-void rd_kafka_buf_callback (rd_kafka_t *rk,
-			    rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err,
-                            rd_kafka_buf_t *response, rd_kafka_buf_t *request);
-
-
-
-/**
- *
- * Write buffer interface
- *
- */
-
-/**
- * Set request API type version
- */
-static RD_UNUSED RD_INLINE void
-rd_kafka_buf_ApiVersion_set (rd_kafka_buf_t *rkbuf,
-                             int16_t version, int features) {
-        rkbuf->rkbuf_reqhdr.ApiVersion = version;
-        rkbuf->rkbuf_features = features;
-}
-
-
-/**
- * @returns the ApiVersion for a request
- */
-#define rd_kafka_buf_ApiVersion(rkbuf) ((rkbuf)->rkbuf_reqhdr.ApiVersion)
-
-
-
-/**
- * Write (copy) data to buffer at current write-buffer position.
- * There must be enough space allocated in the rkbuf.
- * Returns offset to written destination buffer.
- */
-static RD_INLINE size_t rd_kafka_buf_write (rd_kafka_buf_t *rkbuf,
-                                        const void *data, size_t len) {
-        size_t r;
-
-        r = rd_buf_write(&rkbuf->rkbuf_buf, data, len);
-
-        if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC)
-                rkbuf->rkbuf_crc = rd_crc32_update(rkbuf->rkbuf_crc, data, len);
-
-        return r;
-}
-
-
-
-/**
- * Write (copy) 'data' to buffer at 'ptr'.
- * There must be enough space to fit 'len'.
- * This will overwrite the buffer at given location and length.
- *
- * NOTE: rd_kafka_buf_update() MUST NOT be called when a CRC calculation
- *       is in progress (between rd_kafka_buf_crc_init() & .._crc_finalize())
- */
-static RD_INLINE void rd_kafka_buf_update (rd_kafka_buf_t *rkbuf, size_t of,
-                                          const void *data, size_t len) {
-        rd_kafka_assert(NULL, !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC));
-        rd_buf_write_update(&rkbuf->rkbuf_buf, of, data, len);
-}
-
-/**
- * Write int8_t to buffer.
- */
-static RD_INLINE size_t rd_kafka_buf_write_i8 (rd_kafka_buf_t *rkbuf,
-					      int8_t v) {
-        return rd_kafka_buf_write(rkbuf, &v, sizeof(v));
-}
-
-/**
- * Update int8_t in buffer at offset 'of'.
- * 'of' should have been previously returned by `.._buf_write_i8()`.
- */
-static RD_INLINE void rd_kafka_buf_update_i8 (rd_kafka_buf_t *rkbuf,
-					     size_t of, int8_t v) {
-        rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
-}
-
-/**
- * Write int16_t to buffer.
- * The value will be endian-swapped before write.
- */
-static RD_INLINE size_t rd_kafka_buf_write_i16 (rd_kafka_buf_t *rkbuf,
-					       int16_t v) {
-        v = htobe16(v);
-        return rd_kafka_buf_write(rkbuf, &v, sizeof(v));
-}
-
-/**
- * Update int16_t in buffer at offset 'of'.
- * 'of' should have been previously returned by `.._buf_write_i16()`.
- */
-static RD_INLINE void rd_kafka_buf_update_i16 (rd_kafka_buf_t *rkbuf,
-                                              size_t of, int16_t v) {
-        v = htobe16(v);
-        rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
-}
-
-/**
- * Write int32_t to buffer.
- * The value will be endian-swapped before write.
- */
-static RD_INLINE size_t rd_kafka_buf_write_i32 (rd_kafka_buf_t *rkbuf,
-                                               int32_t v) {
-        v = htobe32(v);
-        return rd_kafka_buf_write(rkbuf, &v, sizeof(v));
-}
-
-/**
- * Update int32_t in buffer at offset 'of'.
- * 'of' should have been previously returned by `.._buf_write_i32()`.
- */
-static RD_INLINE void rd_kafka_buf_update_i32 (rd_kafka_buf_t *rkbuf,
-                                              size_t of, int32_t v) {
-        v = htobe32(v);
-        rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
-}
-
-/**
- * Update int32_t in buffer at offset 'of'.
- * 'of' should have been previously returned by `.._buf_write_i32()`.
- */
-static RD_INLINE void rd_kafka_buf_update_u32 (rd_kafka_buf_t *rkbuf,
-                                              size_t of, uint32_t v) {
-        v = htobe32(v);
-        rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
-}
-
-
-/**
- * Write int64_t to buffer.
- * The value will be endian-swapped before write.
- */
-static RD_INLINE size_t rd_kafka_buf_write_i64 (rd_kafka_buf_t *rkbuf, int64_t v) {
-        v = htobe64(v);
-        return rd_kafka_buf_write(rkbuf, &v, sizeof(v));
-}
-
-/**
- * Update int64_t in buffer at address 'ptr'.
- * 'of' should have been previously returned by `.._buf_write_i64()`.
- */
-static RD_INLINE void rd_kafka_buf_update_i64 (rd_kafka_buf_t *rkbuf,
-                                              size_t of, int64_t v) {
-        v = htobe64(v);
-        rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
-}
-
-
-/**
- * Write (copy) Kafka string to buffer.
- */
-static RD_INLINE size_t rd_kafka_buf_write_kstr (rd_kafka_buf_t *rkbuf,
-                                                const rd_kafkap_str_t *kstr) {
-        return rd_kafka_buf_write(rkbuf, RD_KAFKAP_STR_SER(kstr),
-				  RD_KAFKAP_STR_SIZE(kstr));
-}
-
-/**
- * Write (copy) char * string to buffer.
- */
-static RD_INLINE size_t rd_kafka_buf_write_str (rd_kafka_buf_t *rkbuf,
-                                               const char *str, size_t len) {
-        size_t r;
-        if (!str)
-                len = RD_KAFKAP_STR_LEN_NULL;
-        else if (len == (size_t)-1)
-                len = strlen(str);
-        r = rd_kafka_buf_write_i16(rkbuf, (int16_t) len);
-        if (str)
-                rd_kafka_buf_write(rkbuf, str, len);
-        return r;
-}
-
-
-/**
- * Push (i.e., no copy) Kafka string to buffer iovec
- */
-static RD_INLINE void rd_kafka_buf_push_kstr (rd_kafka_buf_t *rkbuf,
-                                             const rd_kafkap_str_t *kstr) {
-	rd_kafka_buf_push(rkbuf, RD_KAFKAP_STR_SER(kstr),
-			  RD_KAFKAP_STR_SIZE(kstr), NULL);
-}
-
-
-
-/**
- * Write (copy) Kafka bytes to buffer.
- */
-static RD_INLINE size_t rd_kafka_buf_write_kbytes (rd_kafka_buf_t *rkbuf,
-					          const rd_kafkap_bytes_t *kbytes){
-        return rd_kafka_buf_write(rkbuf, RD_KAFKAP_BYTES_SER(kbytes),
-                                  RD_KAFKAP_BYTES_SIZE(kbytes));
-}
-
-/**
- * Push (i.e., no copy) Kafka bytes to buffer iovec
- */
-static RD_INLINE void rd_kafka_buf_push_kbytes (rd_kafka_buf_t *rkbuf,
-					       const rd_kafkap_bytes_t *kbytes){
-	rd_kafka_buf_push(rkbuf, RD_KAFKAP_BYTES_SER(kbytes),
-			  RD_KAFKAP_BYTES_SIZE(kbytes), NULL);
-}
-
-/**
- * Write (copy) binary bytes to buffer as Kafka bytes encapsulate data.
- */
-static RD_INLINE size_t rd_kafka_buf_write_bytes (rd_kafka_buf_t *rkbuf,
-                                                 const void *payload, size_t size) {
-        size_t r;
-        if (!payload)
-                size = RD_KAFKAP_BYTES_LEN_NULL;
-        r = rd_kafka_buf_write_i32(rkbuf, (int32_t) size);
-        if (payload)
-                rd_kafka_buf_write(rkbuf, payload, size);
-        return r;
-}
-
-
-
-
-/**
- * Write Kafka Message to buffer
- * The number of bytes written is returned in '*outlenp'.
- *
- * Returns the buffer offset of the first byte.
- */
-size_t rd_kafka_buf_write_Message (rd_kafka_broker_t *rkb,
-				   rd_kafka_buf_t *rkbuf,
-				   int64_t Offset, int8_t MagicByte,
-				   int8_t Attributes, int64_t Timestamp,
-				   const void *key, int32_t key_len,
-				   const void *payload, int32_t len,
-				   int *outlenp);
-
-/**
- * Start calculating CRC from now and track it in '*crcp'.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_buf_crc_init (rd_kafka_buf_t *rkbuf) {
-	rd_kafka_assert(NULL, !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC));
-	rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_CRC;
-	rkbuf->rkbuf_crc = rd_crc32_init();
-}
-
-/**
- * Finalizes CRC calculation and returns the calculated checksum.
- */
-static RD_INLINE RD_UNUSED
-rd_crc32_t rd_kafka_buf_crc_finalize (rd_kafka_buf_t *rkbuf) {
-	rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_CRC;
-	return rd_crc32_finalize(rkbuf->rkbuf_crc);
-}
-
-
-
-
-
-/**
- * @brief Check if buffer's replyq.version is outdated.
- * @param rkbuf: may be NULL, for convenience.
- *
- * @returns 1 if this is an outdated buffer, else 0.
- */
-static RD_UNUSED RD_INLINE int
-rd_kafka_buf_version_outdated (const rd_kafka_buf_t *rkbuf, int version) {
-        return rkbuf && rkbuf->rkbuf_replyq.version &&
-                rkbuf->rkbuf_replyq.version < version;
-}


[19/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_request.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_request.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_request.c
deleted file mode 100644
index 2d023b4..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_request.c
+++ /dev/null
@@ -1,1848 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdarg.h>
-
-#include "rdkafka_int.h"
-#include "rdkafka_request.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_metadata.h"
-#include "rdkafka_msgset.h"
-
-#include "rdrand.h"
-
-/**
- * Kafka protocol request and response handling.
- * All of this code runs in the broker thread and uses op queues for
- * propagating results back to the various sub-systems operating in
- * other threads.
- */
-
-
-/**
- * @brief Decide action(s) to take based on the returned error code.
- *
- * The optional var-args is a .._ACTION_END terminated list
- * of action,error tuples which overrides the general behaviour.
- * It is to be read as: for \p error, return \p action(s).
- */
-int rd_kafka_err_action (rd_kafka_broker_t *rkb,
-			 rd_kafka_resp_err_t err,
-			 rd_kafka_buf_t *rkbuf,
-			 rd_kafka_buf_t *request, ...) {
-	va_list ap;
-        int actions = 0;
-	int exp_act;
-
-	/* Match explicitly defined error mappings first. */
-	va_start(ap, request);
-	while ((exp_act = va_arg(ap, int))) {
-		int exp_err = va_arg(ap, int);
-
-		if (err == exp_err)
-			actions |= exp_act;
-	}
-	va_end(ap);
-
-	if (err && rkb && request)
-                rd_rkb_dbg(rkb, BROKER, "REQERR",
-                           "%sRequest failed: %s: explicit actions 0x%x",
-                           rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey),
-                           rd_kafka_err2str(err), actions);
-
-	/* Explicit error match. */
-	if (actions)
-		return actions;
-
-	/* Default error matching */
-        switch (err)
-        {
-        case RD_KAFKA_RESP_ERR_NO_ERROR:
-                break;
-        case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE:
-        case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION:
-        case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE:
-        case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE:
-        case RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE:
-        case RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP:
-        case RD_KAFKA_RESP_ERR__WAIT_COORD:
-                /* Request metadata information update */
-                actions |= RD_KAFKA_ERR_ACTION_REFRESH;
-                break;
-        case RD_KAFKA_RESP_ERR__TIMED_OUT:
-        case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT:
-                /* Broker-side request handling timeout */
-	case RD_KAFKA_RESP_ERR__TRANSPORT:
-		/* Broker connection down */
-		actions |= RD_KAFKA_ERR_ACTION_RETRY;
-		break;
-        case RD_KAFKA_RESP_ERR__DESTROY:
-	case RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT:
-        case RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE:
-        default:
-                actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
-                break;
-        }
-
-        return actions;
-}
-
-
-/**
- * Send GroupCoordinatorRequest
- */
-void rd_kafka_GroupCoordinatorRequest (rd_kafka_broker_t *rkb,
-                                       const rd_kafkap_str_t *cgrp,
-                                       rd_kafka_replyq_t replyq,
-                                       rd_kafka_resp_cb_t *resp_cb,
-                                       void *opaque) {
-        rd_kafka_buf_t *rkbuf;
-
-        rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_GroupCoordinator, 1,
-                                         RD_KAFKAP_STR_SIZE(cgrp));
-        rd_kafka_buf_write_kstr(rkbuf, cgrp);
-
-        rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-
-
-/**
- * @brief Parses and handles Offset replies.
- *
- * Returns the parsed offsets (and errors) in \p offsets
- *
- * @returns 0 on success, else an error.
- */
-rd_kafka_resp_err_t rd_kafka_handle_Offset (rd_kafka_t *rk,
-                                            rd_kafka_broker_t *rkb,
-                                            rd_kafka_resp_err_t err,
-                                            rd_kafka_buf_t *rkbuf,
-                                            rd_kafka_buf_t *request,
-                                            rd_kafka_topic_partition_list_t
-                                            *offsets) {
-
-        const int log_decode_errors = LOG_ERR;
-        int16_t ErrorCode = 0;
-        int32_t TopicArrayCnt;
-        int actions;
-        int16_t api_version;
-
-        if (err) {
-                ErrorCode = err;
-                goto err;
-        }
-
-        api_version = request->rkbuf_reqhdr.ApiVersion;
-
-        /* NOTE:
-         * Broker may return offsets in a different constellation than
-         * in the original request .*/
-
-        rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt);
-        while (TopicArrayCnt-- > 0) {
-                rd_kafkap_str_t ktopic;
-                int32_t PartArrayCnt;
-                char *topic_name;
-
-                rd_kafka_buf_read_str(rkbuf, &ktopic);
-                rd_kafka_buf_read_i32(rkbuf, &PartArrayCnt);
-
-                RD_KAFKAP_STR_DUPA(&topic_name, &ktopic);
-
-                while (PartArrayCnt-- > 0) {
-                        int32_t kpartition;
-                        int32_t OffsetArrayCnt;
-                        int64_t Offset = -1;
-                        rd_kafka_topic_partition_t *rktpar;
-
-                        rd_kafka_buf_read_i32(rkbuf, &kpartition);
-                        rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-
-                        if (api_version == 1) {
-                                int64_t Timestamp;
-                                rd_kafka_buf_read_i64(rkbuf, &Timestamp);
-                                rd_kafka_buf_read_i64(rkbuf, &Offset);
-                        } else if (api_version == 0) {
-                                rd_kafka_buf_read_i32(rkbuf, &OffsetArrayCnt);
-                                /* We only request one offset so just grab
-                                 * the first one. */
-                                while (OffsetArrayCnt-- > 0)
-                                        rd_kafka_buf_read_i64(rkbuf, &Offset);
-                        } else {
-                                rd_kafka_assert(NULL, !*"NOTREACHED");
-                        }
-
-                        rktpar = rd_kafka_topic_partition_list_add(
-                                offsets, topic_name, kpartition);
-                        rktpar->err = ErrorCode;
-                        rktpar->offset = Offset;
-                }
-        }
-
-        goto done;
-
- err_parse:
-        ErrorCode = rkbuf->rkbuf_err;
- err:
-        actions = rd_kafka_err_action(
-                rkb, ErrorCode, rkbuf, request,
-                RD_KAFKA_ERR_ACTION_PERMANENT,
-                RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
-
-                RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_RETRY,
-                RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION,
-
-                RD_KAFKA_ERR_ACTION_END);
-
-        if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
-                char tmp[256];
-                /* Re-query for leader */
-                rd_snprintf(tmp, sizeof(tmp),
-                            "OffsetRequest failed: %s",
-                            rd_kafka_err2str(ErrorCode));
-                rd_kafka_metadata_refresh_known_topics(rk, NULL, 1/*force*/,
-                                                       tmp);
-        }
-
-        if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
-                if (rd_kafka_buf_retry(rkb, request))
-                        return RD_KAFKA_RESP_ERR__IN_PROGRESS;
-                /* FALLTHRU */
-        }
-
-done:
-        return ErrorCode;
-}
-
-
-
-
-
-
-/**
- * Send OffsetRequest for toppar 'rktp'.
- */
-void rd_kafka_OffsetRequest (rd_kafka_broker_t *rkb,
-                             rd_kafka_topic_partition_list_t *partitions,
-                             int16_t api_version,
-                             rd_kafka_replyq_t replyq,
-                             rd_kafka_resp_cb_t *resp_cb,
-                             void *opaque) {
-        rd_kafka_buf_t *rkbuf;
-        int i;
-        size_t of_TopicArrayCnt = 0, of_PartArrayCnt = 0;
-        const char *last_topic = "";
-        int32_t topic_cnt = 0, part_cnt = 0;
-
-        rd_kafka_topic_partition_list_sort_by_topic(partitions);
-
-        rkbuf = rd_kafka_buf_new_request(
-                rkb, RD_KAFKAP_Offset, 1,
-                /* ReplicaId+TopicArrayCnt+Topic */
-                4+4+100+
-                /* PartArrayCnt */
-                4 +
-                /* partition_cnt * Partition+Time+MaxNumOffs */
-                (partitions->cnt * (4+8+4)));
-
-        /* ReplicaId */
-        rd_kafka_buf_write_i32(rkbuf, -1);
-        /* TopicArrayCnt */
-        of_TopicArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); /* updated later */
-
-        for (i = 0 ; i < partitions->cnt ; i++) {
-                const rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
-
-                if (strcmp(rktpar->topic, last_topic)) {
-                        /* Finish last topic, if any. */
-                        if (of_PartArrayCnt > 0)
-                                rd_kafka_buf_update_i32(rkbuf,
-                                                        of_PartArrayCnt,
-                                                        part_cnt);
-
-                        /* Topic */
-                        rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1);
-                        topic_cnt++;
-                        last_topic = rktpar->topic;
-                        /* New topic so reset partition count */
-                        part_cnt = 0;
-
-                        /* PartitionArrayCnt: updated later */
-                        of_PartArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0);
-                }
-
-                /* Partition */
-                rd_kafka_buf_write_i32(rkbuf, rktpar->partition);
-                part_cnt++;
-
-                /* Time/Offset */
-                rd_kafka_buf_write_i64(rkbuf, rktpar->offset);
-
-                if (api_version == 0) {
-                        /* MaxNumberOfOffsets */
-                        rd_kafka_buf_write_i32(rkbuf, 1);
-                }
-        }
-
-        if (of_PartArrayCnt > 0) {
-                rd_kafka_buf_update_i32(rkbuf, of_PartArrayCnt, part_cnt);
-                rd_kafka_buf_update_i32(rkbuf, of_TopicArrayCnt, topic_cnt);
-        }
-
-        rd_kafka_buf_ApiVersion_set(rkbuf, api_version,
-                                    api_version == 1 ?
-                                    RD_KAFKA_FEATURE_OFFSET_TIME : 0);
-
-        rd_rkb_dbg(rkb, TOPIC, "OFFSET",
-                   "OffsetRequest (v%hd, opv %d) "
-                   "for %"PRId32" topic(s) and %"PRId32" partition(s)",
-                   api_version, rkbuf->rkbuf_replyq.version,
-                   topic_cnt, partitions->cnt);
-
-        rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-/**
- * Generic handler for OffsetFetch responses.
- * Offsets for included partitions will be propagated through the passed
- * 'offsets' list.
- *
- * \p update_toppar: update toppar's committed_offset
- */
-rd_kafka_resp_err_t
-rd_kafka_handle_OffsetFetch (rd_kafka_t *rk,
-			     rd_kafka_broker_t *rkb,
-			     rd_kafka_resp_err_t err,
-			     rd_kafka_buf_t *rkbuf,
-			     rd_kafka_buf_t *request,
-			     rd_kafka_topic_partition_list_t *offsets,
-			     int update_toppar) {
-        const int log_decode_errors = LOG_ERR;
-        int32_t TopicArrayCnt;
-        int64_t offset = RD_KAFKA_OFFSET_INVALID;
-        rd_kafkap_str_t metadata;
-        int i;
-        int actions;
-        int seen_cnt = 0;
-
-        if (err)
-                goto err;
-
-        /* Set default offset for all partitions. */
-        rd_kafka_topic_partition_list_set_offsets(rkb->rkb_rk, offsets, 0,
-                                                  RD_KAFKA_OFFSET_INVALID,
-						  0 /* !is commit */);
-
-        rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt);
-        for (i = 0 ; i < TopicArrayCnt ; i++) {
-                rd_kafkap_str_t topic;
-                int32_t PartArrayCnt;
-                char *topic_name;
-                int j;
-
-                rd_kafka_buf_read_str(rkbuf, &topic);
-                rd_kafka_buf_read_i32(rkbuf, &PartArrayCnt);
-
-                RD_KAFKAP_STR_DUPA(&topic_name, &topic);
-
-                for (j = 0 ; j < PartArrayCnt ; j++) {
-                        int32_t partition;
-                        shptr_rd_kafka_toppar_t *s_rktp;
-                        rd_kafka_topic_partition_t *rktpar;
-                        int16_t err2;
-
-                        rd_kafka_buf_read_i32(rkbuf, &partition);
-                        rd_kafka_buf_read_i64(rkbuf, &offset);
-                        rd_kafka_buf_read_str(rkbuf, &metadata);
-                        rd_kafka_buf_read_i16(rkbuf, &err2);
-
-                        rktpar = rd_kafka_topic_partition_list_find(offsets,
-                                                                    topic_name,
-                                                                    partition);
-                        if (!rktpar) {
-				rd_rkb_dbg(rkb, TOPIC, "OFFSETFETCH",
-					   "OffsetFetchResponse: %s [%"PRId32"] "
-					   "not found in local list: ignoring",
-					   topic_name, partition);
-                                continue;
-			}
-
-                        seen_cnt++;
-
-			if (!(s_rktp = rktpar->_private)) {
-				s_rktp = rd_kafka_toppar_get2(rkb->rkb_rk,
-							      topic_name,
-							      partition, 0, 0);
-				/* May be NULL if topic is not locally known */
-				rktpar->_private = s_rktp;
-			}
-
-			/* broker reports invalid offset as -1 */
-			if (offset == -1)
-				rktpar->offset = RD_KAFKA_OFFSET_INVALID;
-			else
-				rktpar->offset = offset;
-                        rktpar->err = err2;
-
-			rd_rkb_dbg(rkb, TOPIC, "OFFSETFETCH",
-				   "OffsetFetchResponse: %s [%"PRId32"] offset %"PRId64,
-				   topic_name, partition, offset);
-
-			if (update_toppar && !err2 && s_rktp) {
-				rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp);
-				/* Update toppar's committed offset */
-				rd_kafka_toppar_lock(rktp);
-				rktp->rktp_committed_offset = rktpar->offset;
-				rd_kafka_toppar_unlock(rktp);
-			}
-
-
-                        if (rktpar->metadata)
-                                rd_free(rktpar->metadata);
-
-                        if (RD_KAFKAP_STR_IS_NULL(&metadata)) {
-                                rktpar->metadata = NULL;
-                                rktpar->metadata_size = 0;
-                        } else {
-                                rktpar->metadata = RD_KAFKAP_STR_DUP(&metadata);
-                                rktpar->metadata_size =
-                                        RD_KAFKAP_STR_LEN(&metadata);
-                        }
-                }
-        }
-
-
-err:
-        rd_rkb_dbg(rkb, TOPIC, "OFFFETCH",
-                   "OffsetFetch for %d/%d partition(s) returned %s",
-                   seen_cnt,
-                   offsets ? offsets->cnt : -1, rd_kafka_err2str(err));
-
-        actions = rd_kafka_err_action(rkb, err, rkbuf, request,
-				      RD_KAFKA_ERR_ACTION_END);
-
-        if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
-                /* Re-query for coordinator */
-                rd_kafka_cgrp_op(rkb->rkb_rk->rk_cgrp, NULL,
-                                 RD_KAFKA_NO_REPLYQ,
-				 RD_KAFKA_OP_COORD_QUERY, err);
-                if (request) {
-                        /* Schedule a retry */
-                        rd_kafka_buf_keep(request);
-                        rd_kafka_broker_buf_retry(request->rkbuf_rkb, request);
-                }
-        }
-
-	return err;
-
- err_parse:
-        err = rkbuf->rkbuf_err;
-        goto err;
-}
-
-
-
-/**
- * opaque=rko wrapper for handle_OffsetFetch.
- * rko->rko_payload MUST be a `rd_kafka_topic_partition_list_t *` which will
- * be filled in with fetch offsets.
- *
- * A reply will be sent on 'rko->rko_replyq' with type RD_KAFKA_OP_OFFSET_FETCH.
- *
- * Locality: cgrp's broker thread
- */
-void rd_kafka_op_handle_OffsetFetch (rd_kafka_t *rk,
-				     rd_kafka_broker_t *rkb,
-                                     rd_kafka_resp_err_t err,
-                                     rd_kafka_buf_t *rkbuf,
-                                     rd_kafka_buf_t *request,
-                                     void *opaque) {
-        rd_kafka_op_t *rko = opaque;
-        rd_kafka_op_t *rko_reply;
-        rd_kafka_topic_partition_list_t *offsets;
-
-	RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_OFFSET_FETCH);
-
-        if (err == RD_KAFKA_RESP_ERR__DESTROY) {
-                /* Termination, quick cleanup. */
-                rd_kafka_op_destroy(rko);
-                return;
-        }
-
-        offsets = rd_kafka_topic_partition_list_copy(
-                rko->rko_u.offset_fetch.partitions);
-
-        rko_reply = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH|RD_KAFKA_OP_REPLY);
-        rko_reply->rko_err = err;
-        rko_reply->rko_u.offset_fetch.partitions = offsets;
-        rko_reply->rko_u.offset_fetch.do_free = 1;
-	if (rko->rko_rktp)
-		rko_reply->rko_rktp = rd_kafka_toppar_keep(
-			rd_kafka_toppar_s2i(rko->rko_rktp));
-
-	/* If all partitions already had usable offsets then there
-	 * was no request sent and thus no reply, the offsets list is
-	 * good to go. */
-	if (rkbuf)
-		rd_kafka_handle_OffsetFetch(rkb->rkb_rk, rkb, err, rkbuf,
-					    request, offsets, 0);
-
-	rd_kafka_replyq_enq(&rko->rko_replyq, rko_reply, 0);
-
-        rd_kafka_op_destroy(rko);
-}
-
-
-
-
-
-
-/**
- * Send OffsetFetchRequest for toppar.
- *
- * Any partition with a usable offset will be ignored, if all partitions
- * have usable offsets then no request is sent at all but an empty
- * reply is enqueued on the replyq.
- */
-void rd_kafka_OffsetFetchRequest (rd_kafka_broker_t *rkb,
-                                  int16_t api_version,
-                                  rd_kafka_topic_partition_list_t *parts,
-				  rd_kafka_replyq_t replyq,
-                                  rd_kafka_resp_cb_t *resp_cb,
-                                  void *opaque) {
-	rd_kafka_buf_t *rkbuf;
-        size_t of_TopicCnt;
-        int TopicCnt = 0;
-        ssize_t of_PartCnt = -1;
-        const char *last_topic = NULL;
-        int PartCnt = 0;
-	int tot_PartCnt = 0;
-        int i;
-
-        rkbuf = rd_kafka_buf_new_request(
-                rkb, RD_KAFKAP_OffsetFetch, 1,
-                RD_KAFKAP_STR_SIZE(rkb->rkb_rk->rk_group_id) +
-                4 +
-                (parts->cnt * 32));
-
-
-        /* ConsumerGroup */
-        rd_kafka_buf_write_kstr(rkbuf, rkb->rkb_rk->rk_group_id);
-
-        /* Sort partitions by topic */
-        rd_kafka_topic_partition_list_sort_by_topic(parts);
-
-	/* TopicArrayCnt */
-        of_TopicCnt = rd_kafka_buf_write_i32(rkbuf, 0); /* Updated later */
-
-        for (i = 0 ; i < parts->cnt ; i++) {
-                rd_kafka_topic_partition_t *rktpar = &parts->elems[i];
-
-		/* Ignore partitions with a usable offset. */
-		if (rktpar->offset != RD_KAFKA_OFFSET_INVALID &&
-		    rktpar->offset != RD_KAFKA_OFFSET_STORED) {
-			rd_rkb_dbg(rkb, TOPIC, "OFFSET",
-				   "OffsetFetchRequest: skipping %s [%"PRId32"] "
-				   "with valid offset %s",
-				   rktpar->topic, rktpar->partition,
-				   rd_kafka_offset2str(rktpar->offset));
-			continue;
-		}
-
-                if (last_topic == NULL || strcmp(last_topic, rktpar->topic)) {
-                        /* New topic */
-
-                        /* Finalize previous PartitionCnt */
-                        if (PartCnt > 0)
-                                rd_kafka_buf_update_u32(rkbuf, of_PartCnt,
-                                                        PartCnt);
-
-                        /* TopicName */
-                        rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1);
-                        /* PartitionCnt, finalized later */
-                        of_PartCnt = rd_kafka_buf_write_i32(rkbuf, 0);
-                        PartCnt = 0;
-			last_topic = rktpar->topic;
-                        TopicCnt++;
-                }
-
-                /* Partition */
-                rd_kafka_buf_write_i32(rkbuf,  rktpar->partition);
-                PartCnt++;
-		tot_PartCnt++;
-        }
-
-        /* Finalize previous PartitionCnt */
-        if (PartCnt > 0)
-                rd_kafka_buf_update_u32(rkbuf, of_PartCnt,  PartCnt);
-
-        /* Finalize TopicCnt */
-        rd_kafka_buf_update_u32(rkbuf, of_TopicCnt, TopicCnt);
-
-        rd_kafka_buf_ApiVersion_set(rkbuf, api_version, 0);
-
-	rd_rkb_dbg(rkb, TOPIC, "OFFSET",
-		   "OffsetFetchRequest(v%d) for %d/%d partition(s)",
-                   api_version, tot_PartCnt, parts->cnt);
-
-	if (tot_PartCnt == 0) {
-		/* No partitions needs OffsetFetch, enqueue empty
-		 * response right away. */
-                rkbuf->rkbuf_replyq = replyq;
-                rkbuf->rkbuf_cb     = resp_cb;
-                rkbuf->rkbuf_opaque = opaque;
-		rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, NULL, rkbuf);
-		return;
-	}
-
-
-
-	rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-/**
- * @remark \p offsets may be NULL if \p err is set
- */
-rd_kafka_resp_err_t
-rd_kafka_handle_OffsetCommit (rd_kafka_t *rk,
-			      rd_kafka_broker_t *rkb,
-			      rd_kafka_resp_err_t err,
-			      rd_kafka_buf_t *rkbuf,
-			      rd_kafka_buf_t *request,
-			      rd_kafka_topic_partition_list_t *offsets) {
-        const int log_decode_errors = LOG_ERR;
-        int32_t TopicArrayCnt;
-        int16_t ErrorCode = 0, last_ErrorCode = 0;
-	int errcnt = 0;
-        int i;
-	int actions;
-
-        if (err)
-		goto err;
-
-        rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt);
-        for (i = 0 ; i < TopicArrayCnt ; i++) {
-                rd_kafkap_str_t topic;
-                char *topic_str;
-                int32_t PartArrayCnt;
-                int j;
-
-                rd_kafka_buf_read_str(rkbuf, &topic);
-                rd_kafka_buf_read_i32(rkbuf, &PartArrayCnt);
-
-                RD_KAFKAP_STR_DUPA(&topic_str, &topic);
-
-                for (j = 0 ; j < PartArrayCnt ; j++) {
-                        int32_t partition;
-                        rd_kafka_topic_partition_t *rktpar;
-
-                        rd_kafka_buf_read_i32(rkbuf, &partition);
-                        rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-
-                        rktpar = rd_kafka_topic_partition_list_find(
-                                offsets, topic_str, partition);
-
-                        if (!rktpar) {
-                                /* Received offset for topic/partition we didn't
-                                 * ask for, this shouldn't really happen. */
-                                continue;
-                        }
-
-                        rktpar->err = ErrorCode;
-			if (ErrorCode) {
-				last_ErrorCode = ErrorCode;
-				errcnt++;
-			}
-                }
-        }
-
-	/* If all partitions failed use error code
-	 * from last partition as the global error. */
-	if (offsets && errcnt == offsets->cnt)
-		err = last_ErrorCode;
-	goto done;
-
- err_parse:
-        err = rkbuf->rkbuf_err;
-
- err:
-        actions = rd_kafka_err_action(
-		rkb, err, rkbuf, request,
-
-		RD_KAFKA_ERR_ACTION_PERMANENT,
-		RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE,
-
-		RD_KAFKA_ERR_ACTION_RETRY,
-		RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS,
-
-		RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_SPECIAL,
-		RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE,
-
-		RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_SPECIAL,
-		RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP,
-
-		RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_RETRY,
-		RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
-
-		RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_RETRY,
-		RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID,
-
-		RD_KAFKA_ERR_ACTION_RETRY,
-		RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
-
-		RD_KAFKA_ERR_ACTION_PERMANENT,
-		RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE,
-
-		RD_KAFKA_ERR_ACTION_PERMANENT,
-		RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
-
-		RD_KAFKA_ERR_ACTION_PERMANENT,
-		RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED,
-
-		RD_KAFKA_ERR_ACTION_END);
-
-	if (actions & RD_KAFKA_ERR_ACTION_REFRESH && rk->rk_cgrp) {
-		/* Mark coordinator dead or re-query for coordinator.
-		 * ..dead() will trigger a re-query. */
-		if (actions & RD_KAFKA_ERR_ACTION_SPECIAL)
-			rd_kafka_cgrp_coord_dead(rk->rk_cgrp, err,
-						 "OffsetCommitRequest failed");
-		else
-			rd_kafka_cgrp_coord_query(rk->rk_cgrp,
-						  "OffsetCommitRequest failed");
-	}
-	if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
-		if (rd_kafka_buf_retry(rkb, request))
-			return RD_KAFKA_RESP_ERR__IN_PROGRESS;
-		/* FALLTHRU */
-	}
-
- done:
-	return err;
-}
-
-
-
-
-/**
- * @brief Send OffsetCommitRequest for a list of partitions.
- *
- * @returns 0 if none of the partitions in \p offsets had valid offsets,
- *          else 1.
- */
-int rd_kafka_OffsetCommitRequest (rd_kafka_broker_t *rkb,
-                                   rd_kafka_cgrp_t *rkcg,
-                                   int16_t api_version,
-                                   rd_kafka_topic_partition_list_t *offsets,
-                                   rd_kafka_replyq_t replyq,
-                                   rd_kafka_resp_cb_t *resp_cb,
-                                   void *opaque, const char *reason) {
-	rd_kafka_buf_t *rkbuf;
-        ssize_t of_TopicCnt = -1;
-        int TopicCnt = 0;
-        const char *last_topic = NULL;
-        ssize_t of_PartCnt = -1;
-        int PartCnt = 0;
-	int tot_PartCnt = 0;
-        int i;
-
-        rd_kafka_assert(NULL, offsets != NULL);
-
-        rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_OffsetCommit,
-                                         1, 100 + (offsets->cnt * 128));
-
-        /* ConsumerGroup */
-        rd_kafka_buf_write_kstr(rkbuf, rkcg->rkcg_group_id);
-
-        /* v1,v2 */
-        if (api_version >= 1) {
-                /* ConsumerGroupGenerationId */
-                rd_kafka_buf_write_i32(rkbuf, rkcg->rkcg_generation_id);
-                /* ConsumerId */
-                rd_kafka_buf_write_kstr(rkbuf, rkcg->rkcg_member_id);
-                /* v2: RetentionTime */
-                if (api_version == 2)
-                        rd_kafka_buf_write_i64(rkbuf, -1);
-        }
-
-        /* Sort offsets by topic */
-        rd_kafka_topic_partition_list_sort_by_topic(offsets);
-
-        /* TopicArrayCnt: Will be updated when we know the number of topics. */
-        of_TopicCnt = rd_kafka_buf_write_i32(rkbuf, 0);
-
-        for (i = 0 ; i < offsets->cnt ; i++) {
-                rd_kafka_topic_partition_t *rktpar = &offsets->elems[i];
-
-		/* Skip partitions with invalid offset. */
-		if (rktpar->offset < 0)
-			continue;
-
-                if (last_topic == NULL || strcmp(last_topic, rktpar->topic)) {
-                        /* New topic */
-
-                        /* Finalize previous PartitionCnt */
-                        if (PartCnt > 0)
-                                rd_kafka_buf_update_u32(rkbuf, of_PartCnt,
-                                                        PartCnt);
-
-                        /* TopicName */
-                        rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1);
-                        /* PartitionCnt, finalized later */
-                        of_PartCnt = rd_kafka_buf_write_i32(rkbuf, 0);
-                        PartCnt = 0;
-			last_topic = rktpar->topic;
-                        TopicCnt++;
-                }
-
-                /* Partition */
-                rd_kafka_buf_write_i32(rkbuf,  rktpar->partition);
-                PartCnt++;
-		tot_PartCnt++;
-
-                /* Offset */
-                rd_kafka_buf_write_i64(rkbuf, rktpar->offset);
-
-                /* v1: TimeStamp */
-                if (api_version == 1)
-                        rd_kafka_buf_write_i64(rkbuf, -1);// FIXME: retention time
-
-                /* Metadata */
-		/* Java client 0.9.0 and broker <0.10.0 can't parse
-		 * Null metadata fields, so as a workaround we send an
-		 * empty string if it's Null. */
-		if (!rktpar->metadata)
-			rd_kafka_buf_write_str(rkbuf, "", 0);
-		else
-			rd_kafka_buf_write_str(rkbuf,
-					       rktpar->metadata,
-					       rktpar->metadata_size);
-        }
-
-	if (tot_PartCnt == 0) {
-		/* No topic+partitions had valid offsets to commit. */
-		rd_kafka_replyq_destroy(&replyq);
-		rd_kafka_buf_destroy(rkbuf);
-		return 0;
-	}
-
-        /* Finalize previous PartitionCnt */
-        if (PartCnt > 0)
-                rd_kafka_buf_update_u32(rkbuf, of_PartCnt,  PartCnt);
-
-        /* Finalize TopicCnt */
-        rd_kafka_buf_update_u32(rkbuf, of_TopicCnt, TopicCnt);
-
-        rd_kafka_buf_ApiVersion_set(rkbuf, api_version, 0);
-
-        rd_rkb_dbg(rkb, TOPIC, "OFFSET",
-                   "Enqueue OffsetCommitRequest(v%d, %d/%d partition(s))): %s",
-                   api_version, tot_PartCnt, offsets->cnt, reason);
-
-	rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
-	return 1;
-
-}
-
-
-
-/**
- * @brief Write "consumer" protocol type MemberState for SyncGroupRequest to
- *        enveloping buffer \p rkbuf.
- */
-static void rd_kafka_group_MemberState_consumer_write (
-        rd_kafka_buf_t *env_rkbuf,
-        const rd_kafka_group_member_t *rkgm) {
-        rd_kafka_buf_t *rkbuf;
-        int i;
-        const char *last_topic = NULL;
-        size_t of_TopicCnt;
-        ssize_t of_PartCnt = -1;
-        int TopicCnt = 0;
-        int PartCnt = 0;
-        rd_slice_t slice;
-
-        rkbuf = rd_kafka_buf_new(1, 100);
-        rd_kafka_buf_write_i16(rkbuf, 0); /* Version */
-        of_TopicCnt = rd_kafka_buf_write_i32(rkbuf, 0); /* Updated later */
-        for (i = 0 ; i < rkgm->rkgm_assignment->cnt ; i++) {
-                const rd_kafka_topic_partition_t *rktpar;
-
-                rktpar = &rkgm->rkgm_assignment->elems[i];
-
-                if (!last_topic || strcmp(last_topic,
-                                          rktpar->topic)) {
-                        if (last_topic)
-                                /* Finalize previous PartitionCnt */
-                                rd_kafka_buf_update_i32(rkbuf, of_PartCnt,
-                                                        PartCnt);
-                        rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1);
-                        /* Updated later */
-                        of_PartCnt = rd_kafka_buf_write_i32(rkbuf, 0);
-                        PartCnt = 0;
-                        last_topic = rktpar->topic;
-                        TopicCnt++;
-                }
-
-                rd_kafka_buf_write_i32(rkbuf, rktpar->partition);
-                PartCnt++;
-        }
-
-        if (of_PartCnt != -1)
-                rd_kafka_buf_update_i32(rkbuf, of_PartCnt, PartCnt);
-        rd_kafka_buf_update_i32(rkbuf, of_TopicCnt, TopicCnt);
-
-        rd_kafka_buf_write_kbytes(rkbuf, rkgm->rkgm_userdata);
-
-        /* Get pointer to binary buffer */
-        rd_slice_init_full(&slice, &rkbuf->rkbuf_buf);
-
-        /* Write binary buffer as Kafka Bytes to enveloping buffer. */
-        rd_kafka_buf_write_i32(env_rkbuf, (int32_t)rd_slice_remains(&slice));
-        rd_buf_write_slice(&env_rkbuf->rkbuf_buf, &slice);
-
-        rd_kafka_buf_destroy(rkbuf);
-}
-
-/**
- * Send SyncGroupRequest
- */
-void rd_kafka_SyncGroupRequest (rd_kafka_broker_t *rkb,
-                                const rd_kafkap_str_t *group_id,
-                                int32_t generation_id,
-                                const rd_kafkap_str_t *member_id,
-                                const rd_kafka_group_member_t
-                                *assignments,
-                                int assignment_cnt,
-                                rd_kafka_replyq_t replyq,
-                                rd_kafka_resp_cb_t *resp_cb,
-                                void *opaque) {
-        rd_kafka_buf_t *rkbuf;
-        int i;
-
-        rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_SyncGroup,
-                                         1,
-                                         RD_KAFKAP_STR_SIZE(group_id) +
-                                         4 /* GenerationId */ +
-                                         RD_KAFKAP_STR_SIZE(member_id) +
-                                         4 /* array size group_assignment */ +
-                                         (assignment_cnt * 100/*guess*/));
-        rd_kafka_buf_write_kstr(rkbuf, group_id);
-        rd_kafka_buf_write_i32(rkbuf, generation_id);
-        rd_kafka_buf_write_kstr(rkbuf, member_id);
-        rd_kafka_buf_write_i32(rkbuf, assignment_cnt);
-
-        for (i = 0 ; i < assignment_cnt ; i++) {
-                const rd_kafka_group_member_t *rkgm = &assignments[i];
-
-                rd_kafka_buf_write_kstr(rkbuf, rkgm->rkgm_member_id);
-                rd_kafka_group_MemberState_consumer_write(rkbuf, rkgm);
-        }
-
-        /* This is a blocking request */
-        rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING;
-        rkbuf->rkbuf_ts_timeout = rd_clock() +
-                (rkb->rkb_rk->rk_conf.group_session_timeout_ms * 1000) +
-                (3*1000*1000/* 3s grace period*/);
-
-        rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-/**
- * Handler for SyncGroup responses
- * opaque must be the cgrp handle.
- */
-void rd_kafka_handle_SyncGroup (rd_kafka_t *rk,
-				rd_kafka_broker_t *rkb,
-                                rd_kafka_resp_err_t err,
-                                rd_kafka_buf_t *rkbuf,
-                                rd_kafka_buf_t *request,
-                                void *opaque) {
-        rd_kafka_cgrp_t *rkcg = opaque;
-        const int log_decode_errors = LOG_ERR;
-        int16_t ErrorCode = 0;
-        rd_kafkap_bytes_t MemberState = RD_ZERO_INIT;
-        int actions;
-
-	if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) {
-		rd_kafka_dbg(rkb->rkb_rk, CGRP, "SYNCGROUP",
-			     "SyncGroup response: discarding outdated request "
-			     "(now in join-state %s)",
-			     rd_kafka_cgrp_join_state_names[rkcg->
-							    rkcg_join_state]);
-		return;
-	}
-
-        if (err) {
-                ErrorCode = err;
-                goto err;
-        }
-
-        rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-        rd_kafka_buf_read_bytes(rkbuf, &MemberState);
-
-err:
-        actions = rd_kafka_err_action(rkb, ErrorCode, rkbuf, request,
-				      RD_KAFKA_ERR_ACTION_END);
-
-        if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
-                /* Re-query for coordinator */
-                rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ,
-				 RD_KAFKA_OP_COORD_QUERY,
-                                 ErrorCode);
-                /* FALLTHRU */
-        }
-
-        rd_kafka_dbg(rkb->rkb_rk, CGRP, "SYNCGROUP",
-                     "SyncGroup response: %s (%d bytes of MemberState data)",
-                     rd_kafka_err2str(ErrorCode),
-                     RD_KAFKAP_BYTES_LEN(&MemberState));
-
-        if (ErrorCode == RD_KAFKA_RESP_ERR__DESTROY)
-                return; /* Termination */
-
-        rd_kafka_cgrp_handle_SyncGroup(rkcg, rkb, ErrorCode, &MemberState);
-
-        return;
-
- err_parse:
-        ErrorCode = rkbuf->rkbuf_err;
-        goto err;
-}
-
-
-/**
- * Send JoinGroupRequest
- */
-void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb,
-                                const rd_kafkap_str_t *group_id,
-                                const rd_kafkap_str_t *member_id,
-                                const rd_kafkap_str_t *protocol_type,
-				const rd_list_t *topics,
-                                rd_kafka_replyq_t replyq,
-                                rd_kafka_resp_cb_t *resp_cb,
-                                void *opaque) {
-        rd_kafka_buf_t *rkbuf;
-        rd_kafka_t *rk = rkb->rkb_rk;
-        rd_kafka_assignor_t *rkas;
-        int i;
-
-        rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_JoinGroup,
-                                         1,
-                                         RD_KAFKAP_STR_SIZE(group_id) +
-                                         4 /* sessionTimeoutMs */ +
-                                         RD_KAFKAP_STR_SIZE(member_id) +
-                                         RD_KAFKAP_STR_SIZE(protocol_type) +
-                                         4 /* array count GroupProtocols */ +
-                                         (rd_list_cnt(topics) * 100));
-        rd_kafka_buf_write_kstr(rkbuf, group_id);
-        rd_kafka_buf_write_i32(rkbuf, rk->rk_conf.group_session_timeout_ms);
-        rd_kafka_buf_write_kstr(rkbuf, member_id);
-        rd_kafka_buf_write_kstr(rkbuf, protocol_type);
-        rd_kafka_buf_write_i32(rkbuf, rk->rk_conf.enabled_assignor_cnt);
-
-        RD_LIST_FOREACH(rkas, &rk->rk_conf.partition_assignors, i) {
-                rd_kafkap_bytes_t *member_metadata;
-		if (!rkas->rkas_enabled)
-			continue;
-                rd_kafka_buf_write_kstr(rkbuf, rkas->rkas_protocol_name);
-                member_metadata = rkas->rkas_get_metadata_cb(rkas, topics);
-                rd_kafka_buf_write_kbytes(rkbuf, member_metadata);
-                rd_kafkap_bytes_destroy(member_metadata);
-        }
-
-        /* This is a blocking request */
-        rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING;
-        rkbuf->rkbuf_ts_timeout = rd_clock() +
-                (rk->rk_conf.group_session_timeout_ms * 1000) +
-                (3*1000*1000/* 3s grace period*/);
-
-        rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-
-
-
-
-/**
- * Send LeaveGroupRequest
- */
-void rd_kafka_LeaveGroupRequest (rd_kafka_broker_t *rkb,
-                                 const rd_kafkap_str_t *group_id,
-                                 const rd_kafkap_str_t *member_id,
-                                 rd_kafka_replyq_t replyq,
-                                 rd_kafka_resp_cb_t *resp_cb,
-                                 void *opaque) {
-        rd_kafka_buf_t *rkbuf;
-
-        rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_LeaveGroup,
-                                         1,
-                                         RD_KAFKAP_STR_SIZE(group_id) +
-                                         RD_KAFKAP_STR_SIZE(member_id));
-        rd_kafka_buf_write_kstr(rkbuf, group_id);
-        rd_kafka_buf_write_kstr(rkbuf, member_id);
-
-        rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-/**
- * Handler for LeaveGroup responses
- * opaque must be the cgrp handle.
- */
-void rd_kafka_handle_LeaveGroup (rd_kafka_t *rk,
-				 rd_kafka_broker_t *rkb,
-                                 rd_kafka_resp_err_t err,
-                                 rd_kafka_buf_t *rkbuf,
-                                 rd_kafka_buf_t *request,
-                                 void *opaque) {
-        rd_kafka_cgrp_t *rkcg = opaque;
-        const int log_decode_errors = LOG_ERR;
-        int16_t ErrorCode = 0;
-        int actions;
-
-        if (err) {
-                ErrorCode = err;
-                goto err;
-        }
-
-        rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-
-
-err:
-        actions = rd_kafka_err_action(rkb, ErrorCode, rkbuf, request,
-				      RD_KAFKA_ERR_ACTION_END);
-
-        if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
-                /* Re-query for coordinator */
-                rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ,
-				 RD_KAFKA_OP_COORD_QUERY, ErrorCode);
-                /* Schedule a retry */
-                rd_kafka_buf_keep(request);
-                rd_kafka_broker_buf_retry(request->rkbuf_rkb, request);
-                return;
-        }
-
-        if (ErrorCode)
-                rd_kafka_dbg(rkb->rkb_rk, CGRP, "LEAVEGROUP",
-                             "LeaveGroup response: %s",
-                             rd_kafka_err2str(ErrorCode));
-
- err_parse:
-        ErrorCode = rkbuf->rkbuf_err;
-        goto err;
-}
-
-
-
-
-
-
-/**
- * Send HeartbeatRequest
- */
-void rd_kafka_HeartbeatRequest (rd_kafka_broker_t *rkb,
-                                const rd_kafkap_str_t *group_id,
-                                int32_t generation_id,
-                                const rd_kafkap_str_t *member_id,
-                                rd_kafka_replyq_t replyq,
-                                rd_kafka_resp_cb_t *resp_cb,
-                                void *opaque) {
-        rd_kafka_buf_t *rkbuf;
-
-        rd_rkb_dbg(rkb, CGRP, "HEARTBEAT",
-                   "Heartbeat for group \"%s\" generation id %"PRId32,
-                   group_id->str, generation_id);
-
-        rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_Heartbeat,
-                                         1,
-                                         RD_KAFKAP_STR_SIZE(group_id) +
-                                         4 /* GenerationId */ +
-                                         RD_KAFKAP_STR_SIZE(member_id));
-
-        rd_kafka_buf_write_kstr(rkbuf, group_id);
-        rd_kafka_buf_write_i32(rkbuf, generation_id);
-        rd_kafka_buf_write_kstr(rkbuf, member_id);
-
-        rkbuf->rkbuf_ts_timeout = rd_clock() +
-                (rkb->rkb_rk->rk_conf.group_session_timeout_ms * 1000);
-
-        rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-
-
-/**
- * Send ListGroupsRequest
- */
-void rd_kafka_ListGroupsRequest (rd_kafka_broker_t *rkb,
-                                 rd_kafka_replyq_t replyq,
-                                 rd_kafka_resp_cb_t *resp_cb,
-                                 void *opaque) {
-        rd_kafka_buf_t *rkbuf;
-
-        rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_ListGroups, 0, 0);
-
-        rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-/**
- * Send DescribeGroupsRequest
- */
-void rd_kafka_DescribeGroupsRequest (rd_kafka_broker_t *rkb,
-                                     const char **groups, int group_cnt,
-                                     rd_kafka_replyq_t replyq,
-                                     rd_kafka_resp_cb_t *resp_cb,
-                                     void *opaque) {
-        rd_kafka_buf_t *rkbuf;
-
-        rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DescribeGroups,
-                                         1, 32*group_cnt);
-
-        rd_kafka_buf_write_i32(rkbuf, group_cnt);
-        while (group_cnt-- > 0)
-                rd_kafka_buf_write_str(rkbuf, groups[group_cnt], -1);
-
-        rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-
-
-/**
- * @brief Generic handler for Metadata responses
- *
- * @locality rdkafka main thread
- */
-static void rd_kafka_handle_Metadata (rd_kafka_t *rk,
-                                      rd_kafka_broker_t *rkb,
-                                      rd_kafka_resp_err_t err,
-                                      rd_kafka_buf_t *rkbuf,
-                                      rd_kafka_buf_t *request,
-                                      void *opaque) {
-        rd_kafka_op_t *rko = opaque; /* Possibly NULL */
-        struct rd_kafka_metadata *md = NULL;
-        const rd_list_t *topics = request->rkbuf_u.Metadata.topics;
-
-        rd_kafka_assert(NULL, err == RD_KAFKA_RESP_ERR__DESTROY ||
-                        thrd_is_current(rk->rk_thread));
-
-	/* Avoid metadata updates when we're terminating. */
-	if (rd_kafka_terminating(rkb->rkb_rk))
-                err = RD_KAFKA_RESP_ERR__DESTROY;
-
-	if (unlikely(err)) {
-                if (err == RD_KAFKA_RESP_ERR__DESTROY) {
-                        /* Terminating */
-                        goto done;
-                }
-
-                /* FIXME: handle errors */
-                rd_rkb_log(rkb, LOG_WARNING, "METADATA",
-                           "Metadata request failed: %s (%dms)",
-                           rd_kafka_err2str(err),
-			   (int)(request->rkbuf_ts_sent/1000));
-	} else {
-
-                if (!topics)
-                        rd_rkb_dbg(rkb, METADATA, "METADATA",
-                                   "===== Received metadata: %s =====",
-                                   request->rkbuf_u.Metadata.reason);
-                else
-                        rd_rkb_dbg(rkb, METADATA, "METADATA",
-                                   "===== Received metadata "
-                                   "(for %d requested topics): %s =====",
-                                   rd_list_cnt(topics),
-                                   request->rkbuf_u.Metadata.reason);
-
-                md = rd_kafka_parse_Metadata(rkb, request, rkbuf);
-		if (!md) {
-			if (rd_kafka_buf_retry(rkb, request))
-				return;
-			err = RD_KAFKA_RESP_ERR__BAD_MSG;
-                }
-        }
-
-        if (rko && rko->rko_replyq.q) {
-                /* Reply to metadata requester, passing on the metadata.
-                 * Reuse requesting rko for the reply. */
-                rko->rko_err = err;
-                rko->rko_u.metadata.md = md;
-
-                rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0);
-                rko = NULL;
-        } else {
-                if (md)
-                        rd_free(md);
-        }
-
- done:
-        if (rko)
-                rd_kafka_op_destroy(rko);
-}
-
-
-
-/**
- * @brief Construct MetadataRequest (does not send)
- *
- * \p topics is a list of topic names (char *) to request.
- *
- * !topics          - only request brokers (if supported by broker, else
- *                    all topics)
- *  topics.cnt==0   - all topics in cluster are requested
- *  topics.cnt >0   - only specified topics are requested
- *
- * @param reason    - metadata request reason
- * @param rko       - (optional) rko with replyq for handling response.
- *                    Specifying an rko forces a metadata request even if
- *                    there is already a matching one in-transit.
- *
- * If full metadata for all topics is requested (or all brokers, which
- * results in all-topics on older brokers) and there is already a full request
- * in transit then this function will return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS
- * otherwise RD_KAFKA_RESP_ERR_NO_ERROR. If \p rko is non-NULL the request
- * is sent regardless.
- */
-rd_kafka_resp_err_t
-rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb,
-                          const rd_list_t *topics, const char *reason,
-                          rd_kafka_op_t *rko) {
-        rd_kafka_buf_t *rkbuf;
-        int16_t ApiVersion = 0;
-        int features;
-        int topic_cnt = topics ? rd_list_cnt(topics) : 0;
-        int *full_incr = NULL;
-
-        ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb,
-                                                          RD_KAFKAP_Metadata,
-                                                          0, 2,
-                                                          &features);
-
-        rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_Metadata, 1,
-                                         4 + (50 * topic_cnt));
-
-        if (!reason)
-                reason = "";
-
-        rkbuf->rkbuf_u.Metadata.reason = rd_strdup(reason);
-
-        if (!topics && ApiVersion >= 1) {
-                /* a null(0) array (in the protocol) represents no topics */
-                rd_kafka_buf_write_i32(rkbuf, 0);
-                rd_rkb_dbg(rkb, METADATA, "METADATA",
-                           "Request metadata for brokers only: %s", reason);
-                full_incr = &rkb->rkb_rk->rk_metadata_cache.
-                        rkmc_full_brokers_sent;
-
-        } else {
-                if (topic_cnt == 0 && !rko)
-                        full_incr = &rkb->rkb_rk->rk_metadata_cache.
-                                rkmc_full_topics_sent;
-
-                if (topic_cnt == 0 && ApiVersion >= 1)
-                        rd_kafka_buf_write_i32(rkbuf, -1); /* Null: all topics*/
-                else
-                        rd_kafka_buf_write_i32(rkbuf, topic_cnt);
-
-                if (topic_cnt == 0) {
-                        rkbuf->rkbuf_u.Metadata.all_topics = 1;
-                        rd_rkb_dbg(rkb, METADATA, "METADATA",
-                                   "Request metadata for all topics: "
-                                   "%s", reason);
-                } else
-                        rd_rkb_dbg(rkb, METADATA, "METADATA",
-                                   "Request metadata for %d topic(s): "
-                                   "%s", topic_cnt, reason);
-        }
-
-        if (full_incr) {
-                /* Avoid multiple outstanding full requests
-                 * (since they are redundant and side-effect-less).
-                 * Forced requests (app using metadata() API) are passed
-                 * through regardless. */
-
-                mtx_lock(&rkb->rkb_rk->rk_metadata_cache.
-                         rkmc_full_lock);
-                if (*full_incr > 0 && (!rko || !rko->rko_u.metadata.force)) {
-                        mtx_unlock(&rkb->rkb_rk->rk_metadata_cache.
-                                   rkmc_full_lock);
-                        rd_rkb_dbg(rkb, METADATA, "METADATA",
-                                   "Skipping metadata request: %s: "
-                                   "full request already in-transit",
-                                   reason);
-                        rd_kafka_buf_destroy(rkbuf);
-                        return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
-                }
-
-                (*full_incr)++;
-                mtx_unlock(&rkb->rkb_rk->rk_metadata_cache.
-                           rkmc_full_lock);
-                rkbuf->rkbuf_u.Metadata.decr = full_incr;
-                rkbuf->rkbuf_u.Metadata.decr_lock = &rkb->rkb_rk->
-                        rk_metadata_cache.rkmc_full_lock;
-        }
-
-
-        if (topic_cnt > 0) {
-                char *topic;
-                int i;
-
-                /* Maintain a copy of the topics list so we can purge
-                 * hints from the metadata cache on error. */
-                rkbuf->rkbuf_u.Metadata.topics =
-                        rd_list_copy(topics, rd_list_string_copy, NULL);
-
-                RD_LIST_FOREACH(topic, topics, i)
-                        rd_kafka_buf_write_str(rkbuf, topic, -1);
-
-        }
-
-        rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
-        /* Metadata requests are part of the important control plane
-         * and should go before other requests (Produce, Fetch, etc). */
-        rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLASH;
-
-        rd_kafka_broker_buf_enq_replyq(rkb, rkbuf,
-                                       /* Handle response thru rk_ops,
-                                        * but forward parsed result to
-                                        * rko's replyq when done. */
-                                       RD_KAFKA_REPLYQ(rkb->rkb_rk->
-                                                       rk_ops, 0),
-                                       rd_kafka_handle_Metadata, rko);
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-
-
-
-
-
-
-/**
- * @brief Parses and handles ApiVersion reply.
- *
- * @param apis will be allocated, populated and sorted
- *             with broker's supported APIs.
- * @param api_cnt will be set to the number of elements in \p *apis
-
- * @returns 0 on success, else an error.
- */
-rd_kafka_resp_err_t
-rd_kafka_handle_ApiVersion (rd_kafka_t *rk,
-			    rd_kafka_broker_t *rkb,
-			    rd_kafka_resp_err_t err,
-			    rd_kafka_buf_t *rkbuf,
-			    rd_kafka_buf_t *request,
-			    struct rd_kafka_ApiVersion **apis,
-			    size_t *api_cnt) {
-        const int log_decode_errors = LOG_ERR;
-        int actions;
-	int32_t ApiArrayCnt;
-	int16_t ErrorCode;
-	int i = 0;
-
-	*apis = NULL;
-
-        if (err)
-                goto err;
-
-	rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-	if ((err = ErrorCode))
-		goto err;
-
-        rd_kafka_buf_read_i32(rkbuf, &ApiArrayCnt);
-	if (ApiArrayCnt > 1000)
-		rd_kafka_buf_parse_fail(rkbuf,
-					"ApiArrayCnt %"PRId32" out of range",
-					ApiArrayCnt);
-
-	rd_rkb_dbg(rkb, FEATURE, "APIVERSION",
-		   "Broker API support:");
-
-	*apis = malloc(sizeof(**apis) * ApiArrayCnt);
-
-	for (i = 0 ; i < ApiArrayCnt ; i++) {
-		struct rd_kafka_ApiVersion *api = &(*apis)[i];
-
-		rd_kafka_buf_read_i16(rkbuf, &api->ApiKey);
-		rd_kafka_buf_read_i16(rkbuf, &api->MinVer);
-		rd_kafka_buf_read_i16(rkbuf, &api->MaxVer);
-
-		rd_rkb_dbg(rkb, FEATURE, "APIVERSION",
-			   "  ApiKey %s (%hd) Versions %hd..%hd",
-			   rd_kafka_ApiKey2str(api->ApiKey),
-			   api->ApiKey, api->MinVer, api->MaxVer);
-        }
-
-	*api_cnt = ApiArrayCnt;
-        qsort(*apis, *api_cnt, sizeof(**apis), rd_kafka_ApiVersion_key_cmp);
-
-	goto done;
-
- err_parse:
-        err = rkbuf->rkbuf_err;
- err:
-	if (*apis)
-		rd_free(*apis);
-
-        actions = rd_kafka_err_action(
-		rkb, err, rkbuf, request,
-		RD_KAFKA_ERR_ACTION_END);
-
-	if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
-		if (rd_kafka_buf_retry(rkb, request))
-			return RD_KAFKA_RESP_ERR__IN_PROGRESS;
-		/* FALLTHRU */
-	}
-
-done:
-        return err;
-}
-
-
-
-/**
- * Send ApiVersionRequest (KIP-35)
- */
-void rd_kafka_ApiVersionRequest (rd_kafka_broker_t *rkb,
-				 rd_kafka_replyq_t replyq,
-				 rd_kafka_resp_cb_t *resp_cb,
-				 void *opaque, int flash_msg) {
-        rd_kafka_buf_t *rkbuf;
-
-        rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_ApiVersion, 1, 4);
-	rkbuf->rkbuf_flags |= (flash_msg ? RD_KAFKA_OP_F_FLASH : 0);
-	rd_kafka_buf_write_i32(rkbuf, 0); /* Empty array: request all APIs */
-
-	/* Non-supporting brokers will tear down the connection when they
-	 * receive an unknown API request, so dont retry request on failure. */
-	rkbuf->rkbuf_retries = RD_KAFKA_BUF_NO_RETRIES;
-
-	/* 0.9.0.x brokers will not close the connection on unsupported
-	 * API requests, so we minimize the timeout for the request.
-	 * This is a regression on the broker part. */
-	rkbuf->rkbuf_ts_timeout = rd_clock() + (rkb->rkb_rk->rk_conf.api_version_request_timeout_ms * 1000);
-
-        if (replyq.q)
-                rd_kafka_broker_buf_enq_replyq(rkb,
-                                               rkbuf, replyq, resp_cb, opaque);
-	else /* in broker thread */
-		rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque);
-}
-
-
-/**
- * Send SaslHandshakeRequest (KIP-43)
- */
-void rd_kafka_SaslHandshakeRequest (rd_kafka_broker_t *rkb,
-				    const char *mechanism,
-				    rd_kafka_replyq_t replyq,
-				    rd_kafka_resp_cb_t *resp_cb,
-				    void *opaque, int flash_msg) {
-        rd_kafka_buf_t *rkbuf;
-	int mechlen = (int)strlen(mechanism);
-
-        rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_SaslHandshake,
-                                         1, RD_KAFKAP_STR_SIZE0(mechlen));
-	rkbuf->rkbuf_flags |= (flash_msg ? RD_KAFKA_OP_F_FLASH : 0);
-	rd_kafka_buf_write_str(rkbuf, mechanism, mechlen);
-
-	/* Non-supporting brokers will tear down the conneciton when they
-	 * receive an unknown API request or where the SASL GSSAPI
-	 * token type is not recognized, so dont retry request on failure. */
-	rkbuf->rkbuf_retries = RD_KAFKA_BUF_NO_RETRIES;
-
-	/* 0.9.0.x brokers will not close the connection on unsupported
-	 * API requests, so we minimize the timeout of the request.
-	 * This is a regression on the broker part. */
-	if (!rkb->rkb_rk->rk_conf.api_version_request &&
-            rkb->rkb_rk->rk_conf.socket_timeout_ms > 10*1000)
-		rkbuf->rkbuf_ts_timeout = rd_clock() + (10 * 1000 * 1000);
-
-	if (replyq.q)
-		rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq,
-                                               resp_cb, opaque);
-	else /* in broker thread */
-		rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque);
-}
-
-
-
-
-/**
- * @brief Parses a Produce reply.
- * @returns 0 on success or an error code on failure.
- * @locality broker thread
- */
-static rd_kafka_resp_err_t
-rd_kafka_handle_Produce_parse (rd_kafka_broker_t *rkb,
-                               rd_kafka_toppar_t *rktp,
-                               rd_kafka_buf_t *rkbuf,
-                               rd_kafka_buf_t *request,
-                               int64_t *offsetp,
-                               int64_t *timestampp) {
-        int32_t TopicArrayCnt;
-        int32_t PartitionArrayCnt;
-        struct {
-                int32_t Partition;
-                int16_t ErrorCode;
-                int64_t Offset;
-        } hdr;
-        const int log_decode_errors = LOG_ERR;
-
-        rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt);
-        if (TopicArrayCnt != 1)
-                goto err;
-
-        /* Since we only produce to one single topic+partition in each
-         * request we assume that the reply only contains one topic+partition
-         * and that it is the same that we requested.
-         * If not the broker is buggy. */
-        rd_kafka_buf_skip_str(rkbuf);
-        rd_kafka_buf_read_i32(rkbuf, &PartitionArrayCnt);
-
-        if (PartitionArrayCnt != 1)
-                goto err;
-
-        rd_kafka_buf_read_i32(rkbuf, &hdr.Partition);
-        rd_kafka_buf_read_i16(rkbuf, &hdr.ErrorCode);
-        rd_kafka_buf_read_i64(rkbuf, &hdr.Offset);
-
-        *offsetp = hdr.Offset;
-
-        *timestampp = -1;
-        if (request->rkbuf_reqhdr.ApiVersion >= 2) {
-                rd_kafka_buf_read_i64(rkbuf, timestampp);
-        }
-
-        if (request->rkbuf_reqhdr.ApiVersion >= 1) {
-                int32_t Throttle_Time;
-                rd_kafka_buf_read_i32(rkbuf, &Throttle_Time);
-
-                rd_kafka_op_throttle_time(rkb, rkb->rkb_rk->rk_rep,
-                                          Throttle_Time);
-        }
-
-
-        return hdr.ErrorCode;
-
- err_parse:
-        return rkbuf->rkbuf_err;
- err:
-        return RD_KAFKA_RESP_ERR__BAD_MSG;
-}
-
-
-/**
- * @brief Handle ProduceResponse
- *
- * @locality broker thread
- */
-static void rd_kafka_handle_Produce (rd_kafka_t *rk,
-                                     rd_kafka_broker_t *rkb,
-                                     rd_kafka_resp_err_t err,
-                                     rd_kafka_buf_t *reply,
-                                     rd_kafka_buf_t *request,
-                                     void *opaque) {
-        shptr_rd_kafka_toppar_t *s_rktp = opaque; /* from ProduceRequest() */
-        rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp);
-        int64_t offset = RD_KAFKA_OFFSET_INVALID;
-        int64_t timestamp = -1;
-
-        /* Parse Produce reply (unless the request errored) */
-        if (!err && reply)
-                err = rd_kafka_handle_Produce_parse(rkb, rktp,
-                                                    reply, request,
-                                                    &offset, &timestamp);
-
-
-        if (likely(!err)) {
-                rd_rkb_dbg(rkb, MSG, "MSGSET",
-                           "%s [%"PRId32"]: MessageSet with %i message(s) "
-                           "delivered",
-                           rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
-                           rd_atomic32_get(&request->rkbuf_msgq.rkmq_msg_cnt));
-
-        } else {
-                /* Error */
-                int actions;
-
-                actions = rd_kafka_err_action(
-                        rkb, err, reply, request,
-
-                        RD_KAFKA_ERR_ACTION_REFRESH,
-                        RD_KAFKA_RESP_ERR__TRANSPORT,
-
-                        RD_KAFKA_ERR_ACTION_REFRESH,
-                        RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
-
-                        RD_KAFKA_ERR_ACTION_END);
-
-                rd_rkb_dbg(rkb, MSG, "MSGSET",
-                           "%s [%"PRId32"]: MessageSet with %i message(s) "
-                           "encountered error: %s (actions 0x%x)",
-                           rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
-                           rd_atomic32_get(&request->rkbuf_msgq.rkmq_msg_cnt),
-                           rd_kafka_err2str(err), actions);
-
-                /* NOTE: REFRESH implies a later retry, which does NOT affect
-                 *       the retry count since refresh-errors are considered
-                 *       to be stale metadata rather than temporary errors.
-                 *
-                 *       This is somewhat problematic since it may cause
-                 *       duplicate messages even with retries=0 if the
-                 *       ProduceRequest made it to the broker but only the
-                 *       response was lost due to network connectivity issues.
-                 *       That problem will be sorted when EoS is implemented.
-                 */
-                if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
-                        /* Request metadata information update */
-                        rd_kafka_toppar_leader_unavailable(rktp,
-                                                           "produce", err);
-
-                        /* Move messages (in the rkbuf) back to the partition's
-                         * queue head. They will be resent when a new leader
-                         * is delegated. */
-                        rd_kafka_toppar_insert_msgq(rktp, &request->rkbuf_msgq);
-
-                        /* No need for fallthru here since the request
-                         * no longer has any messages associated with it. */
-                        goto done;
-                }
-
-                if ((actions & RD_KAFKA_ERR_ACTION_RETRY) &&
-                    rd_kafka_buf_retry(rkb, request))
-                        return; /* Scheduled for retry */
-
-                /* Refresh implies a later retry through other means */
-                if (actions & RD_KAFKA_ERR_ACTION_REFRESH)
-                        goto done;
-
-                /* Translate request-level timeout error code
-                 * to message-level timeout error code. */
-                if (err == RD_KAFKA_RESP_ERR__TIMED_OUT)
-                        err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
-
-                /* Fatal errors: no message transmission retries */
-                /* FALLTHRU */
-        }
-
-        /* Propagate assigned offset and timestamp back to app. */
-        if (likely(offset != RD_KAFKA_OFFSET_INVALID)) {
-                rd_kafka_msg_t *rkm;
-                if (rktp->rktp_rkt->rkt_conf.produce_offset_report) {
-                        /* produce.offset.report: each message */
-                        TAILQ_FOREACH(rkm, &request->rkbuf_msgq.rkmq_msgs,
-                                      rkm_link) {
-                                rkm->rkm_offset = offset++;
-                                if (timestamp != -1) {
-                                        rkm->rkm_timestamp = timestamp;
-                                        rkm->rkm_tstype = RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME;
-                                }
-                        }
-                } else {
-                        /* Last message in each batch */
-                        rkm = TAILQ_LAST(&request->rkbuf_msgq.rkmq_msgs,
-                                         rd_kafka_msg_head_s);
-                        rkm->rkm_offset = offset +
-                                rd_atomic32_get(&request->rkbuf_msgq.
-                                                rkmq_msg_cnt) - 1;
-                        if (timestamp != -1) {
-                                rkm->rkm_timestamp = timestamp;
-                                rkm->rkm_tstype = RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME;
-                        }
-                }
-        }
-
-        /* Enqueue messages for delivery report */
-        rd_kafka_dr_msgq(rktp->rktp_rkt, &request->rkbuf_msgq, err);
-
- done:
-        rd_kafka_toppar_destroy(s_rktp); /* from ProduceRequest() */
-}
-
-
-/**
- * @brief Send ProduceRequest for messages in toppar queue.
- *
- * @returns the number of messages included, or 0 on error / no messages.
- *
- * @locality broker thread
- */
-int rd_kafka_ProduceRequest (rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp) {
-        rd_kafka_buf_t *rkbuf;
-        rd_kafka_itopic_t *rkt = rktp->rktp_rkt;
-        size_t MessageSetSize = 0;
-        int cnt;
-
-        /**
-         * Create ProduceRequest with as many messages from the toppar
-         * transmit queue as possible.
-         */
-        rkbuf = rd_kafka_msgset_create_ProduceRequest(rkb, rktp,
-                                                      &MessageSetSize);
-        if (unlikely(!rkbuf))
-                return 0;
-
-        cnt = rd_atomic32_get(&rkbuf->rkbuf_msgq.rkmq_msg_cnt);
-        rd_dassert(cnt > 0);
-
-        rd_atomic64_add(&rktp->rktp_c.tx_msgs, cnt);
-        rd_atomic64_add(&rktp->rktp_c.tx_bytes, MessageSetSize);
-
-        if (!rkt->rkt_conf.required_acks)
-                rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_NO_RESPONSE;
-
-        /* Use timeout from first message. */
-        rkbuf->rkbuf_ts_timeout =
-                TAILQ_FIRST(&rkbuf->rkbuf_msgq.rkmq_msgs)->rkm_ts_timeout;
-
-        rd_kafka_broker_buf_enq_replyq(rkb, rkbuf,
-                                       RD_KAFKA_NO_REPLYQ,
-                                       rd_kafka_handle_Produce,
-                                       /* toppar ref for handle_Produce() */
-                                       rd_kafka_toppar_keep(rktp));
-
-        return cnt;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_request.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_request.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_request.h
deleted file mode 100644
index a6a11e5..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_request.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include "rdkafka_cgrp.h"
-#include "rdkafka_feature.h"
-
-
-#define RD_KAFKA_ERR_ACTION_PERMANENT  0x1 /* Permanent error */
-#define RD_KAFKA_ERR_ACTION_IGNORE     0x2 /* Error can be ignored */
-#define RD_KAFKA_ERR_ACTION_REFRESH    0x4 /* Refresh state (e.g., metadata) */
-#define RD_KAFKA_ERR_ACTION_RETRY      0x8 /* Retry request after backoff */
-#define RD_KAFKA_ERR_ACTION_INFORM    0x10 /* Inform application about err */
-#define RD_KAFKA_ERR_ACTION_SPECIAL   0x20 /* Special-purpose, depends on context */
-#define RD_KAFKA_ERR_ACTION_END          0 /* var-arg sentinel */
-
-int rd_kafka_err_action (rd_kafka_broker_t *rkb,
-			 rd_kafka_resp_err_t err,
-			 rd_kafka_buf_t *rkbuf,
-			 rd_kafka_buf_t *request, ...);
-
-
-void rd_kafka_GroupCoordinatorRequest (rd_kafka_broker_t *rkb,
-                                       const rd_kafkap_str_t *cgrp,
-                                       rd_kafka_replyq_t replyq,
-                                       rd_kafka_resp_cb_t *resp_cb,
-                                       void *opaque);
-
-rd_kafka_resp_err_t rd_kafka_handle_Offset (rd_kafka_t *rk,
-					    rd_kafka_broker_t *rkb,
-					    rd_kafka_resp_err_t err,
-					    rd_kafka_buf_t *rkbuf,
-					    rd_kafka_buf_t *request,
-                                            rd_kafka_topic_partition_list_t
-                                            *offsets);
-
-void rd_kafka_OffsetRequest (rd_kafka_broker_t *rkb,
-                             rd_kafka_topic_partition_list_t *offsets,
-                             int16_t api_version,
-                             rd_kafka_replyq_t replyq,
-                             rd_kafka_resp_cb_t *resp_cb,
-                             void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_handle_OffsetFetch (rd_kafka_t *rk,
-			     rd_kafka_broker_t *rkb,
-			     rd_kafka_resp_err_t err,
-			     rd_kafka_buf_t *rkbuf,
-			     rd_kafka_buf_t *request,
-			     rd_kafka_topic_partition_list_t *offsets,
-			     int update_toppar);
-
-void rd_kafka_op_handle_OffsetFetch (rd_kafka_t *rk,
-				     rd_kafka_broker_t *rkb,
-                                     rd_kafka_resp_err_t err,
-                                     rd_kafka_buf_t *rkbuf,
-                                     rd_kafka_buf_t *request,
-                                     void *opaque);
-
-void rd_kafka_OffsetFetchRequest (rd_kafka_broker_t *rkb,
-                                  int16_t api_version,
-                                  rd_kafka_topic_partition_list_t *parts,
-                                  rd_kafka_replyq_t replyq,
-                                  rd_kafka_resp_cb_t *resp_cb,
-                                  void *opaque);
-
-
-
-rd_kafka_resp_err_t
-rd_kafka_handle_OffsetCommit (rd_kafka_t *rk,
-			      rd_kafka_broker_t *rkb,
-			      rd_kafka_resp_err_t err,
-			      rd_kafka_buf_t *rkbuf,
-			      rd_kafka_buf_t *request,
-			      rd_kafka_topic_partition_list_t *offsets);
-int rd_kafka_OffsetCommitRequest (rd_kafka_broker_t *rkb,
-				  rd_kafka_cgrp_t *rkcg,
-				  int16_t api_version,
-				  rd_kafka_topic_partition_list_t *offsets,
-				  rd_kafka_replyq_t replyq,
-				  rd_kafka_resp_cb_t *resp_cb,
-				  void *opaque, const char *reason);
-
-
-
-void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb,
-                                const rd_kafkap_str_t *group_id,
-                                const rd_kafkap_str_t *member_id,
-                                const rd_kafkap_str_t *protocol_type,
-				const rd_list_t *topics,
-                                rd_kafka_replyq_t replyq,
-                                rd_kafka_resp_cb_t *resp_cb,
-                                void *opaque);
-
-
-void rd_kafka_LeaveGroupRequest (rd_kafka_broker_t *rkb,
-                                 const rd_kafkap_str_t *group_id,
-                                 const rd_kafkap_str_t *member_id,
-                                 rd_kafka_replyq_t replyq,
-                                 rd_kafka_resp_cb_t *resp_cb,
-                                 void *opaque);
-void rd_kafka_handle_LeaveGroup (rd_kafka_t *rk,
-				 rd_kafka_broker_t *rkb,
-                                 rd_kafka_resp_err_t err,
-                                 rd_kafka_buf_t *rkbuf,
-                                 rd_kafka_buf_t *request,
-                                 void *opaque);
-
-void rd_kafka_SyncGroupRequest (rd_kafka_broker_t *rkb,
-                                const rd_kafkap_str_t *group_id,
-                                int32_t generation_id,
-                                const rd_kafkap_str_t *member_id,
-                                const rd_kafka_group_member_t
-                                *assignments,
-                                int assignment_cnt,
-                                rd_kafka_replyq_t replyq,
-                                rd_kafka_resp_cb_t *resp_cb,
-                                void *opaque);
-void rd_kafka_handle_SyncGroup (rd_kafka_t *rk,
-				rd_kafka_broker_t *rkb,
-                                rd_kafka_resp_err_t err,
-                                rd_kafka_buf_t *rkbuf,
-                                rd_kafka_buf_t *request,
-                                void *opaque);
-
-void rd_kafka_ListGroupsRequest (rd_kafka_broker_t *rkb,
-                                 rd_kafka_replyq_t replyq,
-                                 rd_kafka_resp_cb_t *resp_cb,
-                                 void *opaque);
-
-void rd_kafka_DescribeGroupsRequest (rd_kafka_broker_t *rkb,
-                                     const char **groups, int group_cnt,
-                                     rd_kafka_replyq_t replyq,
-                                     rd_kafka_resp_cb_t *resp_cb,
-                                     void *opaque);
-
-
-void rd_kafka_HeartbeatRequest (rd_kafka_broker_t *rkb,
-                                const rd_kafkap_str_t *group_id,
-                                int32_t generation_id,
-                                const rd_kafkap_str_t *member_id,
-                                rd_kafka_replyq_t replyq,
-                                rd_kafka_resp_cb_t *resp_cb,
-                                void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb,
-                          const rd_list_t *topics, const char *reason,
-                          rd_kafka_op_t *rko);
-
-rd_kafka_resp_err_t
-rd_kafka_handle_ApiVersion (rd_kafka_t *rk,
-			    rd_kafka_broker_t *rkb,
-			    rd_kafka_resp_err_t err,
-			    rd_kafka_buf_t *rkbuf,
-			    rd_kafka_buf_t *request,
-			    struct rd_kafka_ApiVersion **apis,
-			    size_t *api_cnt);
-void rd_kafka_ApiVersionRequest (rd_kafka_broker_t *rkb,
-				 rd_kafka_replyq_t replyq,
-				 rd_kafka_resp_cb_t *resp_cb,
-				 void *opaque, int flash_msg);
-
-void rd_kafka_SaslHandshakeRequest (rd_kafka_broker_t *rkb,
-				    const char *mechanism,
-				    rd_kafka_replyq_t replyq,
-				    rd_kafka_resp_cb_t *resp_cb,
-				    void *opaque, int flash_msg);
-
-int rd_kafka_ProduceRequest (rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_roundrobin_assignor.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_roundrobin_assignor.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_roundrobin_assignor.c
deleted file mode 100644
index 0482f88..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_roundrobin_assignor.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#include "rdkafka_int.h"
-#include "rdkafka_assignor.h"
-
-
-/**
- * Source: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/consumer/RoundRobinAssignor.java
- *
- * The roundrobin assignor lays out all the available partitions and all the
- * available consumers. It then proceeds to do a roundrobin assignment from
- * partition to consumer. If the subscriptions of all consumer instances are
- * identical, then the partitions will be uniformly distributed. (i.e., the 
- * partition ownership counts will be within a delta of exactly one across all
- * consumers.)
- *
- * For example, suppose there are two consumers C0 and C1, two topics t0 and
- * t1, and each topic has 3 partitions, resulting in partitions t0p0, t0p1,
- * t0p2, t1p0, t1p1, and t1p2.
- *
- * The assignment will be:
- * C0: [t0p0, t0p2, t1p1]
- * C1: [t0p1, t1p0, t1p2]
- */
-
-rd_kafka_resp_err_t
-rd_kafka_roundrobin_assignor_assign_cb (rd_kafka_t *rk,
-					const char *member_id,
-					const char *protocol_name,
-					const rd_kafka_metadata_t *metadata,
-					rd_kafka_group_member_t *members,
-					size_t member_cnt,
-					rd_kafka_assignor_topic_t
-					**eligible_topics,
-					size_t eligible_topic_cnt,
-					char *errstr, size_t errstr_size,
-					void *opaque) {
-        unsigned int ti;
-	int next = 0; /* Next member id */
-
-	/* Sort topics by name */
-	qsort(eligible_topics, eligible_topic_cnt, sizeof(*eligible_topics),
-	      rd_kafka_assignor_topic_cmp);
-
-	/* Sort members by name */
-	qsort(members, member_cnt, sizeof(*members),
-	      rd_kafka_group_member_cmp);
-
-        for (ti = 0 ; ti < eligible_topic_cnt ; ti++) {
-                rd_kafka_assignor_topic_t *eligible_topic = eligible_topics[ti];
-		int partition;
-
-		/* For each topic+partition, assign one member (in a cyclic
-		 * iteration) per partition until the partitions are exhausted*/
-		for (partition = 0 ;
-		     partition < eligible_topic->metadata->partition_cnt ;
-		     partition++) {
-			rd_kafka_group_member_t *rkgm;
-
-			/* Scan through members until we find one with a
-			 * subscription to this topic. */
-			while (!rd_kafka_group_member_find_subscription(
-				       rk, &members[next],
-				       eligible_topic->metadata->topic))
-				next++;
-
-			rkgm = &members[next];
-
-			rd_kafka_dbg(rk, CGRP, "ASSIGN",
-				     "roundrobin: Member \"%s\": "
-				     "assigned topic %s partition %d",
-				     rkgm->rkgm_member_id->str,
-				     eligible_topic->metadata->topic,
-				     partition);
-
-			rd_kafka_topic_partition_list_add(
-				rkgm->rkgm_assignment,
-				eligible_topic->metadata->topic, partition);
-
-			next = (next+1) % rd_list_cnt(&eligible_topic->members);
-		}
-	}
-
-
-        return 0;
-}
-
-
-


[48/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/examples/kafkatest_verifiable_client.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/examples/kafkatest_verifiable_client.cpp b/thirdparty/librdkafka-0.11.1/examples/kafkatest_verifiable_client.cpp
deleted file mode 100644
index 057db32..0000000
--- a/thirdparty/librdkafka-0.11.1/examples/kafkatest_verifiable_client.cpp
+++ /dev/null
@@ -1,934 +0,0 @@
-/*
- * Copyright (c) 2015, Confluent Inc
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * librdkafka version of the Java VerifiableProducer and VerifiableConsumer
- * for use with the official Kafka client tests.
- */
-
-
-#include <iostream>
-#include <fstream>
-#include <sstream>
-#include <map>
-#include <string>
-#include <algorithm>
-#include <cstdlib>
-#include <cstdio>
-#include <csignal>
-#include <cstring>
-#include <unistd.h>
-#include <sys/time.h>
-#include <assert.h>
-#include <ctype.h>
-#include <strings.h>
-
-#ifdef _MSC_VER
-#include "../win32/wingetopt.h"
-#elif _AIX
-#include <unistd.h>
-#else
-#include <getopt.h>
-#endif
-
-/*
- * Typically include path in a real application would be
- * #include <librdkafka/rdkafkacpp.h>
- */
-#include "rdkafkacpp.h"
-
-static bool run = true;
-static bool exit_eof = false;
-static int verbosity = 1;
-static std::string value_prefix;
-
-class Assignment {
-
- public:
-  static std::string name (const std::string &t, int partition) {
-    std::stringstream stm;
-    stm << t << "." << partition;
-    return stm.str();
-  }
-
-  Assignment(): topic(""), partition(-1), consumedMessages(0),
-                minOffset(-1), maxOffset(0) {
-    printf("Created assignment\n");
-  }
-  Assignment(const Assignment &a) {
-    topic = a.topic;
-    partition = a.partition;
-    consumedMessages = a.consumedMessages;
-    minOffset = a.minOffset;
-    maxOffset = a.maxOffset;
-  }
-
-  Assignment &operator=(const Assignment &a) {
-    this->topic = a.topic;
-    this->partition = a.partition;
-    this->consumedMessages = a.consumedMessages;
-    this->minOffset = a.minOffset;
-    this->maxOffset = a.maxOffset;
-    return *this;
-  }
-
-  int operator==(const Assignment &a) const {
-    return !(this->topic == a.topic &&
-             this->partition == a.partition);
-  }
-
-  int operator<(const Assignment &a) const {
-    if (this->topic < a.topic) return 1;
-    if (this->topic >= a.topic) return 0;
-    return (this->partition < a.partition);
-  }
-
-  void setup (std::string t, int32_t p) {
-    assert(!t.empty());
-    assert(topic.empty() || topic == t);
-    assert(partition == -1 || partition == p);
-    topic = t;
-    partition = p;
-  }
-
-  std::string topic;
-  int partition;
-  int consumedMessages;
-  int64_t minOffset;
-  int64_t maxOffset;
-};
-
-
-
-
-static struct {
-  int maxMessages;
-
-  struct {
-    int numAcked;
-    int numSent;
-    int numErr;
-  } producer;
-
-  struct {
-    int consumedMessages;
-    int consumedMessagesLastReported;
-    int consumedMessagesAtLastCommit;
-    bool useAutoCommit;
-    std::map<std::string, Assignment> assignments;
-  } consumer;
-} state = {
-  /* .maxMessages = */ -1
-};
-
-
-static RdKafka::KafkaConsumer *consumer;
-
-
-static std::string now () {
-  struct timeval tv;
-  gettimeofday(&tv, NULL);
-  time_t t = tv.tv_sec;
-  struct tm tm;
-  char buf[64];
-
-  localtime_r(&t, &tm);
-  strftime(buf, sizeof(buf), "%H:%M:%S", &tm);
-  snprintf(buf+strlen(buf), sizeof(buf)-strlen(buf), ".%03d",
-           (int)(tv.tv_usec / 1000));
-
-  return buf;
-}
-
-
-static time_t watchdog_last_kick;
-static const int watchdog_timeout = 20; /* Must be > socket.timeout.ms */
-static void sigwatchdog (int sig) {
-  time_t t = time(NULL);
-  if (watchdog_last_kick + watchdog_timeout <= t) {
-    std::cerr << now() << ": WATCHDOG TIMEOUT (" <<
-        (int)(t - watchdog_last_kick) << "s): TERMINATING" << std::endl;
-    int *i = NULL;
-    *i = 100;
-    abort();
-  }
-}
-
-static void watchdog_kick () {
-  watchdog_last_kick = time(NULL);
-
-  /* Safe guard against hangs-on-exit */
-  alarm(watchdog_timeout);
-}
-
-
-
-
-
-static void errorString (const std::string &name,
-                         const std::string &errmsg,
-                         const std::string &topic,
-                         const std::string *key,
-                         const std::string &value) {
-  std::cout << "{ "
-            << "\"name\": \"" << name << "\", "
-            << "\"_time\": \"" << now() << "\", "
-            << "\"message\": \"" << errmsg << "\", "
-            << "\"topic\": \"" << topic << "\", "
-            << "\"key\": \"" << (key ? *key : "NULL") << "\", "
-            << "\"value\": \"" << value << "\" "
-            << "}" << std::endl;
-}
-
-
-static void successString (const std::string &name,
-                           const std::string &topic,
-                           int partition,
-                           int64_t offset,
-                           const std::string *key,
-                           const std::string &value) {
-  std::cout << "{ "
-            << "\"name\": \"" << name << "\", "
-            << "\"_time\": \"" << now() << "\", "
-            << "\"topic\": \"" << topic << "\", "
-            << "\"partition\": " << partition << ", "
-            << "\"offset\": " << offset << ", "
-            << "\"key\": \"" << (key ? *key : "NULL") << "\", "
-            << "\"value\": \"" << value << "\" "
-            << "}" << std::endl;
-}
-
-
-#if FIXME
-static void offsetStatus (bool success,
-                          const std::string &topic,
-                          int partition,
-                          int64_t offset,
-                          const std::string &errstr) {
-  std::cout << "{ "
-      "\"name\": \"offsets_committed\", " <<
-      "\"success\": " << success << ", " <<
-      "\"offsets\": [ " <<
-      " { " <<
-      " \"topic\": \"" << topic << "\", " <<
-      " \"partition\": " << partition << ", " <<
-      " \"offset\": " << (int)offset << ", " <<
-      " \"error\": \"" << errstr << "\" " <<
-      " } " <<
-      "] }" << std::endl;
-
-}
-#endif
-
-
-static void sigterm (int sig) {
-
-  std::cerr << now() << ": Terminating because of signal " << sig << std::endl;
-
-  if (!run) {
-    std::cerr << now() << ": Forced termination" << std::endl;
-    exit(1);
-  }
-  run = false;
-}
-
-
-class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
- public:
-  void dr_cb (RdKafka::Message &message) {
-    if (message.err()) {
-      state.producer.numErr++;
-      errorString("producer_send_error", message.errstr(),
-                  message.topic_name(),
-                  message.key(),
-                  std::string(static_cast<const char*>(message.payload()),
-                              message.len()));
-    } else {
-      successString("producer_send_success",
-                    message.topic_name(),
-                    (int)message.partition(),
-                    message.offset(),
-                    message.key(),
-                    std::string(static_cast<const char*>(message.payload()),
-                                message.len()));
-      state.producer.numAcked++;
-    }
-  }
-};
-
-
-class ExampleEventCb : public RdKafka::EventCb {
- public:
-  void event_cb (RdKafka::Event &event) {
-    switch (event.type())
-    {
-      case RdKafka::Event::EVENT_ERROR:
-        std::cerr << now() << ": ERROR (" << RdKafka::err2str(event.err()) << "): " <<
-            event.str() << std::endl;
-        break;
-
-      case RdKafka::Event::EVENT_STATS:
-        std::cerr << now() << ": \"STATS\": " << event.str() << std::endl;
-        break;
-
-      case RdKafka::Event::EVENT_LOG:
-        std::cerr << now() << ": LOG-" << event.severity() << "-"
-                  << event.fac() << ": " << event.str() << std::endl;
-        break;
-
-      default:
-        std::cerr << now() << ": EVENT " << event.type() <<
-            " (" << RdKafka::err2str(event.err()) << "): " <<
-            event.str() << std::endl;
-        break;
-    }
-  }
-};
-
-
-/* Use of this partitioner is pretty pointless since no key is provided
- * in the produce() call. */
-class MyHashPartitionerCb : public RdKafka::PartitionerCb {
- public:
-  int32_t partitioner_cb (const RdKafka::Topic *topic, const std::string *key,
-                          int32_t partition_cnt, void *msg_opaque) {
-    return djb_hash(key->c_str(), key->size()) % partition_cnt;
-  }
- private:
-
-  static inline unsigned int djb_hash (const char *str, size_t len) {
-    unsigned int hash = 5381;
-    for (size_t i = 0 ; i < len ; i++)
-      hash = ((hash << 5) + hash) + str[i];
-    return hash;
-  }
-};
-
-
-
-
-
-/**
- * Print number of records consumed, every 100 messages or on timeout.
- */
-static void report_records_consumed (int immediate) {
-  std::map<std::string,Assignment> *assignments = &state.consumer.assignments;
-
-  if (state.consumer.consumedMessages <=
-      state.consumer.consumedMessagesLastReported + (immediate ? 0 : 999))
-    return;
-
-  std::cout << "{ "
-      "\"name\": \"records_consumed\", " <<
-      "\"_totcount\": " << state.consumer.consumedMessages << ", " <<
-      "\"count\": " << (state.consumer.consumedMessages -
-                        state.consumer.consumedMessagesLastReported) << ", " <<
-      "\"partitions\": [ ";
-
-  for (std::map<std::string,Assignment>::iterator ii = assignments->begin() ;
-       ii != assignments->end() ; ii++) {
-    Assignment *a = &(*ii).second;
-    assert(!a->topic.empty());
-    std::cout << (ii == assignments->begin() ? "": ", ") << " { " <<
-        " \"topic\": \"" << a->topic << "\", " <<
-        " \"partition\": " << a->partition << ", " <<
-        " \"minOffset\": " << a->minOffset << ", " <<
-        " \"maxOffset\": " << a->maxOffset << " " <<
-        " } ";
-    a->minOffset = -1;
-  }
-
-  std::cout << "] }" << std::endl;
-
-  state.consumer.consumedMessagesLastReported = state.consumer.consumedMessages;
-}
-
-
-class ExampleOffsetCommitCb : public RdKafka::OffsetCommitCb {
- public:
-  void offset_commit_cb (RdKafka::ErrorCode err,
-                         std::vector<RdKafka::TopicPartition*> &offsets) {
-    std::cerr << now() << ": Propagate offset for " << offsets.size() << " partitions, error: " << RdKafka::err2str(err) << std::endl;
-
-    /* No offsets to commit, dont report anything. */
-    if (err == RdKafka::ERR__NO_OFFSET)
-      return;
-
-    /* Send up-to-date records_consumed report to make sure consumed > committed */
-    report_records_consumed(1);
-
-    std::cout << "{ " <<
-        "\"name\": \"offsets_committed\", " <<
-        "\"success\": " << (err ? "false" : "true") << ", " <<
-        "\"error\": \"" << (err ? RdKafka::err2str(err) : "") << "\", " <<
-        "\"_autocommit\": " << (state.consumer.useAutoCommit ? "true":"false") << ", " <<
-        "\"offsets\": [ ";
-    assert(offsets.size() > 0);
-    for (unsigned int i = 0 ; i < offsets.size() ; i++) {
-      std::cout << (i == 0 ? "" : ", ") << "{ " <<
-          " \"topic\": \"" << offsets[i]->topic() << "\", " <<
-          " \"partition\": " << offsets[i]->partition() << ", " <<
-          " \"offset\": " << (int)offsets[i]->offset() << ", " <<
-          " \"error\": \"" <<
-          (offsets[i]->err() ? RdKafka::err2str(offsets[i]->err()) : "") <<
-          "\" " <<
-          " }";
-    }
-    std::cout << " ] }" << std::endl;
-
-  }
-};
-
-static ExampleOffsetCommitCb ex_offset_commit_cb;
-
-
-/**
- * Commit every 1000 messages or whenever there is a consume timeout.
- */
-static void do_commit (RdKafka::KafkaConsumer *consumer,
-                      int immediate) {
-  if (!immediate &&
-      (state.consumer.useAutoCommit ||
-       state.consumer.consumedMessagesAtLastCommit + 1000 >
-       state.consumer.consumedMessages))
-    return;
-
-  /* Make sure we report consumption before commit,
-   * otherwise tests may fail because of commit > consumed. */
-  if (state.consumer.consumedMessagesLastReported <
-      state.consumer.consumedMessages)
-    report_records_consumed(1);
-
-  std::cerr << now() << ": committing " <<
-    (state.consumer.consumedMessages -
-     state.consumer.consumedMessagesAtLastCommit) << " messages" << std::endl;
-
-  RdKafka::ErrorCode err;
-  err = consumer->commitSync(&ex_offset_commit_cb);
-
-  std::cerr << now() << ": " <<
-    "sync commit returned " << RdKafka::err2str(err) << std::endl;
-
-  state.consumer.consumedMessagesAtLastCommit =
-    state.consumer.consumedMessages;
-}
-
-
-void msg_consume(RdKafka::KafkaConsumer *consumer,
-                 RdKafka::Message* msg, void* opaque) {
-  switch (msg->err()) {
-    case RdKafka::ERR__TIMED_OUT:
-      /* Try reporting consumed messages */
-      report_records_consumed(1);
-      /* Commit one every consume() timeout instead of on every message.
-       * Also commit on every 1000 messages, whichever comes first. */
-      do_commit(consumer, 1);
-      break;
-
-
-    case RdKafka::ERR_NO_ERROR:
-      {
-        /* Real message */
-        if (verbosity > 2)
-          std::cerr << now() << ": Read msg from " << msg->topic_name() <<
-              " [" << (int)msg->partition() << "]  at offset " <<
-              msg->offset() << std::endl;
-
-        if (state.maxMessages >= 0 &&
-            state.consumer.consumedMessages >= state.maxMessages)
-          return;
-
-
-        Assignment *a =
-            &state.consumer.assignments[Assignment::name(msg->topic_name(),
-                                                         msg->partition())];
-        a->setup(msg->topic_name(), msg->partition());
-
-        a->consumedMessages++;
-        if (a->minOffset == -1)
-          a->minOffset = msg->offset();
-        if (a->maxOffset < msg->offset())
-          a->maxOffset = msg->offset();
-
-        if (msg->key()) {
-          if (verbosity >= 3)
-            std::cerr << now() << ": Key: " << *msg->key() << std::endl;
-        }
-
-        if (verbosity >= 3)
-          fprintf(stderr, "%.*s\n",
-                  static_cast<int>(msg->len()),
-                  static_cast<const char *>(msg->payload()));
-
-        state.consumer.consumedMessages++;
-
-        report_records_consumed(0);
-
-        do_commit(consumer, 0);
-      }
-      break;
-
-    case RdKafka::ERR__PARTITION_EOF:
-      /* Last message */
-      if (exit_eof) {
-        std::cerr << now() << ": Terminate: exit on EOF" << std::endl;
-        run = false;
-      }
-      break;
-
-    case RdKafka::ERR__UNKNOWN_TOPIC:
-    case RdKafka::ERR__UNKNOWN_PARTITION:
-      std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl;
-      run = false;
-      break;
-
-    case RdKafka::ERR_GROUP_COORDINATOR_NOT_AVAILABLE:
-      std::cerr << now() << ": Warning: " << msg->errstr() << std::endl;
-      break;
-
-    default:
-      /* Errors */
-      std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl;
-      run = false;
-  }
-}
-
-
-
-
-class ExampleConsumeCb : public RdKafka::ConsumeCb {
- public:
-  void consume_cb (RdKafka::Message &msg, void *opaque) {
-    msg_consume(consumer_, &msg, opaque);
-  }
-  RdKafka::KafkaConsumer *consumer_;
-};
-
-class ExampleRebalanceCb : public RdKafka::RebalanceCb {
- private:
-  static std::string part_list_json (const std::vector<RdKafka::TopicPartition*> &partitions) {
-    std::ostringstream out;
-    for (unsigned int i = 0 ; i < partitions.size() ; i++)
-      out << (i==0?"":", ") << "{ " <<
-          " \"topic\": \"" << partitions[i]->topic() << "\", " <<
-          " \"partition\": " << partitions[i]->partition() <<
-          " }";
-    return out.str();
-  }
- public:
-  void rebalance_cb (RdKafka::KafkaConsumer *consumer,
-                     RdKafka::ErrorCode err,
-                     std::vector<RdKafka::TopicPartition*> &partitions) {
-
-    std::cerr << now() << ": rebalance_cb " << RdKafka::err2str(err) <<
-        " for " << partitions.size() << " partitions" << std::endl;
-    /* Send message report prior to rebalancing event to make sure they
-     * are accounted for on the "right side" of the rebalance. */
-    report_records_consumed(1);
-
-    if (err == RdKafka::ERR__ASSIGN_PARTITIONS)
-      consumer->assign(partitions);
-    else {
-      do_commit(consumer, 1);
-      consumer->unassign();
-    }
-
-    std::cout <<
-      "{ " <<
-      "\"name\": \"partitions_" << (err == RdKafka::ERR__ASSIGN_PARTITIONS ?
-                                    "assigned" : "revoked") << "\", " <<
-      "\"partitions\": [ " << part_list_json(partitions) << "] }" << std::endl;
-
-  }
-};
-
-
-
-
-static void read_conf_file (const std::string &conf_file) {
-  std::ifstream inf(conf_file.c_str());
-
-  std::string line;
-  while (std::getline(inf, line)) {
-    std::cerr << now() << ": conf_file: " << conf_file << ": " << line << std::endl;
-  }
-
-  inf.close();
-}
-
-
-
-
-int main (int argc, char **argv) {
-  std::string brokers = "localhost";
-  std::string errstr;
-  std::vector<std::string> topics;
-  std::string conf_file;
-  std::string mode = "P";
-  int throughput = 0;
-  int32_t partition = RdKafka::Topic::PARTITION_UA;
-  bool do_conf_dump = false;
-  MyHashPartitionerCb hash_partitioner;
-
-  std::cerr << now() << ": librdkafka version " << RdKafka::version_str() <<
-    " (" << RdKafka::version() << ")" << std::endl;
-
-  /*
-   * Create configuration objects
-   */
-  RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
-  RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
-
-  /* Avoid slow shutdown on error */
-  if (tconf->set("message.timeout.ms", "60000", errstr)) {
-    std::cerr << now() << ": " << errstr << std::endl;
-    exit(1);
-  }
-
-  {
-    char hostname[128];
-    gethostname(hostname, sizeof(hostname)-1);
-    conf->set("client.id", std::string("rdkafka@") + hostname, errstr);
-  }
-
-  conf->set("log.thread.name", "true", errstr);
-
-  /* correct producer offsets */
-  tconf->set("produce.offset.report", "true", errstr);
-
-  /* auto commit is explicitly enabled with --enable-autocommit */
-  conf->set("enable.auto.commit", "false", errstr);
-
-  /* keep protocol request timeouts under the watchdog timeout
-   * to make sure things like commitSync() dont fall victim to the watchdog. */
-  conf->set("socket.timeout.ms", "10000", errstr);
-
-  conf->set("fetch.wait.max.ms", "500", errstr);
-  conf->set("fetch.min.bytes", "4096", errstr);
-
-  for (int i = 1 ; i < argc ; i++) {
-    const char *name = argv[i];
-    const char *val = i+1 < argc ? argv[i+1] : NULL;
-
-    if (val && !strncmp(val, "-", 1))
-      val = NULL;
-
-    std::cout << now() << ": argument: " << name << " " <<
-        (val?val:"") << std::endl;
-
-    if (val) {
-      if (!strcmp(name, "--topic"))
-        topics.push_back(val);
-      else if (!strcmp(name, "--broker-list"))
-        brokers = val;
-      else if (!strcmp(name, "--max-messages"))
-        state.maxMessages = atoi(val);
-      else if (!strcmp(name, "--throughput"))
-        throughput = atoi(val);
-      else if (!strcmp(name, "--producer.config") ||
-               !strcmp(name, "--consumer.config"))
-        read_conf_file(val);
-      else if (!strcmp(name, "--group-id"))
-        conf->set("group.id", val, errstr);
-      else if (!strcmp(name, "--session-timeout"))
-        conf->set("session.timeout.ms", val, errstr);
-      else if (!strcmp(name, "--reset-policy")) {
-        if (tconf->set("auto.offset.reset", val, errstr)) {
-          std::cerr << now() << ": " << errstr << std::endl;
-          exit(1);
-        }
-      } else if (!strcmp(name, "--assignment-strategy")) {
-        /* The system tests pass the Java class name(s) rather than
-         * the configuration value. Fix it.
-         * "org.apache.kafka.clients.consumer.RangeAssignor,.." -> "range,.."
-         */
-        std::string s = val;
-        size_t pos;
-
-        while ((pos = s.find("org.apache.kafka.clients.consumer.")) !=
-               std::string::npos)
-          s.erase(pos, strlen("org.apache.kafka.clients.consumer."));
-
-        while ((pos = s.find("Assignor")) != std::string::npos)
-          s.erase(pos, strlen("Assignor"));
-
-        std::transform(s.begin(), s.end(), s.begin(), tolower);
-
-        std::cerr << now() << ": converted " << name << " "
-                  << val << " to " << s << std::endl;
-
-        if  (conf->set("partition.assignment.strategy", s.c_str(), errstr)) {
-          std::cerr << now() << ": " << errstr << std::endl;
-          exit(1);
-        }
-      } else if (!strcmp(name, "--value-prefix")) {
-        value_prefix = std::string(val) + ".";
-      } else if (!strcmp(name, "--debug")) {
-        conf->set("debug", val, errstr);
-      } else if (!strcmp(name, "-X")) {
-        char *s = strdup(val);
-        char *t = strchr(s, '=');
-        if (!t)
-          t = (char *)"";
-        else {
-          *t = '\0';
-          t++;
-        }
-        if (conf->set(s, t, errstr)) {
-          std::cerr << now() << ": " << errstr << std::endl;
-          exit(1);
-        }
-        free(s);
-      } else {
-        std::cerr << now() << ": Unknown option " << name << std::endl;
-        exit(1);
-      }
-
-      i++;
-
-    } else {
-      if (!strcmp(name, "--consumer"))
-        mode = "C";
-      else if (!strcmp(name, "--producer"))
-        mode = "P";
-      else if (!strcmp(name, "--enable-autocommit")) {
-        state.consumer.useAutoCommit = true;
-        conf->set("enable.auto.commit", "true", errstr);
-      } else if (!strcmp(name, "-v"))
-        verbosity++;
-      else if (!strcmp(name, "-q"))
-        verbosity--;
-      else {
-        std::cerr << now() << ": Unknown option or missing argument to " << name << std::endl;
-        exit(1);
-      }
-    }
-  }
-
-  if (topics.empty() || brokers.empty()) {
-    std::cerr << now() << ": Missing --topic and --broker-list" << std::endl;
-    exit(1);
-  }
-
-
-  /*
-   * Set configuration properties
-   */
-  conf->set("metadata.broker.list", brokers, errstr);
-
-  ExampleEventCb ex_event_cb;
-  conf->set("event_cb", &ex_event_cb, errstr);
-
-  if (do_conf_dump) {
-    int pass;
-
-    for (pass = 0 ; pass < 2 ; pass++) {
-      std::list<std::string> *dump;
-      if (pass == 0) {
-        dump = conf->dump();
-        std::cerr << now() << ": # Global config" << std::endl;
-      } else {
-        dump = tconf->dump();
-        std::cerr << now() << ": # Topic config" << std::endl;
-      }
-
-      for (std::list<std::string>::iterator it = dump->begin();
-           it != dump->end(); ) {
-        std::cerr << *it << " = ";
-        it++;
-        std::cerr << *it << std::endl;
-        it++;
-      }
-      std::cerr << std::endl;
-    }
-    exit(0);
-  }
-
-  signal(SIGINT, sigterm);
-  signal(SIGTERM, sigterm);
-  signal(SIGALRM,  sigwatchdog);
-
-
-  if (mode == "P") {
-    /*
-     * Producer mode
-     */
-
-    ExampleDeliveryReportCb ex_dr_cb;
-
-    /* Set delivery report callback */
-    conf->set("dr_cb", &ex_dr_cb, errstr);
-
-    /*
-     * Create producer using accumulated global configuration.
-     */
-    RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
-    if (!producer) {
-      std::cerr << now() << ": Failed to create producer: " << errstr << std::endl;
-      exit(1);
-    }
-
-    std::cerr << now() << ": % Created producer " << producer->name() << std::endl;
-
-    /*
-     * Create topic handle.
-     */
-    RdKafka::Topic *topic = RdKafka::Topic::create(producer, topics[0],
-                                                   tconf, errstr);
-    if (!topic) {
-      std::cerr << now() << ": Failed to create topic: " << errstr << std::endl;
-      exit(1);
-    }
-
-    static const int delay_us = throughput ? 1000000/throughput : 10;
-
-    if (state.maxMessages == -1)
-      state.maxMessages = 1000000; /* Avoid infinite produce */
-
-    for (int i = 0 ; run && i < state.maxMessages ; i++) {
-      /*
-       * Produce message
-       */
-      std::ostringstream msg;
-      msg << value_prefix << i;
-      while (true) {
-        RdKafka::ErrorCode resp =
-            producer->produce(topic, partition,
-                              RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
-                              const_cast<char *>(msg.str().c_str()),
-                              msg.str().size(), NULL, NULL);
-        if (resp == RdKafka::ERR__QUEUE_FULL) {
-          producer->poll(100);
-          continue;
-        } else if (resp != RdKafka::ERR_NO_ERROR) {
-          errorString("producer_send_error",
-                      RdKafka::err2str(resp), topic->name(), NULL, msg.str());
-          state.producer.numErr++;
-        } else {
-          state.producer.numSent++;
-        }
-        break;
-      }
-
-      producer->poll(delay_us / 1000);
-      usleep(1000);
-      watchdog_kick();
-    }
-    run = true;
-
-    while (run && producer->outq_len() > 0) {
-      std::cerr << now() << ": Waiting for " << producer->outq_len() << std::endl;
-      producer->poll(1000);
-      watchdog_kick();
-    }
-
-    std::cerr << now() << ": " << state.producer.numAcked << "/" <<
-        state.producer.numSent << "/" << state.maxMessages <<
-        " msgs acked/sent/max, " << state.producer.numErr <<
-        " errored" << std::endl;
-
-    delete topic;
-    delete producer;
-
-
-  } else if (mode == "C") {
-    /*
-     * Consumer mode
-     */
-
-    tconf->set("auto.offset.reset", "smallest", errstr);
-
-    /* Set default topic config */
-    conf->set("default_topic_conf", tconf, errstr);
-
-    ExampleRebalanceCb ex_rebalance_cb;
-    conf->set("rebalance_cb", &ex_rebalance_cb, errstr);
-
-    conf->set("offset_commit_cb", &ex_offset_commit_cb, errstr);
-
-
-    /*
-     * Create consumer using accumulated global configuration.
-     */
-    consumer = RdKafka::KafkaConsumer::create(conf, errstr);
-    if (!consumer) {
-      std::cerr << now() << ": Failed to create consumer: " <<
-          errstr << std::endl;
-      exit(1);
-    }
-
-    std::cerr << now() << ": % Created consumer " << consumer->name() <<
-        std::endl;
-
-    /*
-     * Subscribe to topic(s)
-     */
-    RdKafka::ErrorCode resp = consumer->subscribe(topics);
-    if (resp != RdKafka::ERR_NO_ERROR) {
-      std::cerr << now() << ": Failed to subscribe to " << topics.size() << " topics: "
-                << RdKafka::err2str(resp) << std::endl;
-      exit(1);
-    }
-
-    watchdog_kick();
-
-    /*
-     * Consume messages
-     */
-    while (run) {
-      RdKafka::Message *msg = consumer->consume(500);
-      msg_consume(consumer, msg, NULL);
-      delete msg;
-      watchdog_kick();
-    }
-
-    std::cerr << now() << ": Final commit on termination" << std::endl;
-
-    /* Final commit */
-    do_commit(consumer, 1);
-
-    /*
-     * Stop consumer
-     */
-    consumer->close();
-
-    delete consumer;
-  }
-
-  std::cout << "{ \"name\": \"shutdown_complete\" }" << std::endl;
-
-  /*
-   * Wait for RdKafka to decommission.
-   * This is not strictly needed (when check outq_len() above), but
-   * allows RdKafka to clean up all its resources before the application
-   * exits so that memory profilers such as valgrind wont complain about
-   * memory leaks.
-   */
-  RdKafka::wait_destroyed(5000);
-
-  std::cerr << now() << ": EXITING WITH RETURN VALUE 0" << std::endl;
-  return 0;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/examples/rdkafka_consumer_example.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/examples/rdkafka_consumer_example.c b/thirdparty/librdkafka-0.11.1/examples/rdkafka_consumer_example.c
deleted file mode 100644
index 3896df8..0000000
--- a/thirdparty/librdkafka-0.11.1/examples/rdkafka_consumer_example.c
+++ /dev/null
@@ -1,624 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2015, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Apache Kafka high level consumer example program
- * using the Kafka driver from librdkafka
- * (https://github.com/edenhill/librdkafka)
- */
-
-#include <ctype.h>
-#include <signal.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <syslog.h>
-#include <sys/time.h>
-#include <errno.h>
-#include <getopt.h>
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is builtin from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h"  /* for Kafka driver */
-
-
-static int run = 1;
-static rd_kafka_t *rk;
-static int exit_eof = 0;
-static int wait_eof = 0;  /* number of partitions awaiting EOF */
-static int quiet = 0;
-static 	enum {
-	OUTPUT_HEXDUMP,
-	OUTPUT_RAW,
-} output = OUTPUT_HEXDUMP;
-
-static void stop (int sig) {
-        if (!run)
-                exit(1);
-	run = 0;
-	fclose(stdin); /* abort fgets() */
-}
-
-
-static void hexdump (FILE *fp, const char *name, const void *ptr, size_t len) {
-	const char *p = (const char *)ptr;
-	unsigned int of = 0;
-
-
-	if (name)
-		fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
-
-	for (of = 0 ; of < len ; of += 16) {
-		char hexen[16*3+1];
-		char charen[16+1];
-		int hof = 0;
-
-		int cof = 0;
-		int i;
-
-		for (i = of ; i < (int)of + 16 && i < (int)len ; i++) {
-			hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff);
-			cof += sprintf(charen+cof, "%c",
-				       isprint((int)p[i]) ? p[i] : '.');
-		}
-		fprintf(fp, "%08x: %-48s %-16s\n",
-			of, hexen, charen);
-	}
-}
-
-/**
- * Kafka logger callback (optional)
- */
-static void logger (const rd_kafka_t *rk, int level,
-		    const char *fac, const char *buf) {
-	struct timeval tv;
-	gettimeofday(&tv, NULL);
-	fprintf(stdout, "%u.%03u RDKAFKA-%i-%s: %s: %s\n",
-		(int)tv.tv_sec, (int)(tv.tv_usec / 1000),
-		level, fac, rd_kafka_name(rk), buf);
-}
-
-
-
-/**
- * Handle and print a consumed message.
- * Internally crafted messages are also used to propagate state from
- * librdkafka to the application. The application needs to check
- * the `rkmessage->err` field for this purpose.
- */
-static void msg_consume (rd_kafka_message_t *rkmessage) {
-	if (rkmessage->err) {
-		if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
-			fprintf(stderr,
-				"%% Consumer reached end of %s [%"PRId32"] "
-			       "message queue at offset %"PRId64"\n",
-			       rd_kafka_topic_name(rkmessage->rkt),
-			       rkmessage->partition, rkmessage->offset);
-
-			if (exit_eof && --wait_eof == 0) {
-                                fprintf(stderr,
-                                        "%% All partition(s) reached EOF: "
-                                        "exiting\n");
-				run = 0;
-                        }
-
-			return;
-		}
-
-                if (rkmessage->rkt)
-                        fprintf(stderr, "%% Consume error for "
-                                "topic \"%s\" [%"PRId32"] "
-                                "offset %"PRId64": %s\n",
-                                rd_kafka_topic_name(rkmessage->rkt),
-                                rkmessage->partition,
-                                rkmessage->offset,
-                                rd_kafka_message_errstr(rkmessage));
-                else
-                        fprintf(stderr, "%% Consumer error: %s: %s\n",
-                                rd_kafka_err2str(rkmessage->err),
-                                rd_kafka_message_errstr(rkmessage));
-
-                if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
-                    rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
-                        run = 0;
-		return;
-	}
-
-	if (!quiet)
-		fprintf(stdout, "%% Message (topic %s [%"PRId32"], "
-                        "offset %"PRId64", %zd bytes):\n",
-                        rd_kafka_topic_name(rkmessage->rkt),
-                        rkmessage->partition,
-			rkmessage->offset, rkmessage->len);
-
-	if (rkmessage->key_len) {
-		if (output == OUTPUT_HEXDUMP)
-			hexdump(stdout, "Message Key",
-				rkmessage->key, rkmessage->key_len);
-		else
-			printf("Key: %.*s\n",
-			       (int)rkmessage->key_len, (char *)rkmessage->key);
-	}
-
-	if (output == OUTPUT_HEXDUMP)
-		hexdump(stdout, "Message Payload",
-			rkmessage->payload, rkmessage->len);
-	else
-		printf("%.*s\n",
-		       (int)rkmessage->len, (char *)rkmessage->payload);
-}
-
-
-static void print_partition_list (FILE *fp,
-                                  const rd_kafka_topic_partition_list_t
-                                  *partitions) {
-        int i;
-        for (i = 0 ; i < partitions->cnt ; i++) {
-                fprintf(stderr, "%s %s [%"PRId32"] offset %"PRId64,
-                        i > 0 ? ",":"",
-                        partitions->elems[i].topic,
-                        partitions->elems[i].partition,
-			partitions->elems[i].offset);
-        }
-        fprintf(stderr, "\n");
-
-}
-static void rebalance_cb (rd_kafka_t *rk,
-                          rd_kafka_resp_err_t err,
-			  rd_kafka_topic_partition_list_t *partitions,
-                          void *opaque) {
-
-	fprintf(stderr, "%% Consumer group rebalanced: ");
-
-	switch (err)
-	{
-	case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
-		fprintf(stderr, "assigned:\n");
-		print_partition_list(stderr, partitions);
-		rd_kafka_assign(rk, partitions);
-		wait_eof += partitions->cnt;
-		break;
-
-	case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
-		fprintf(stderr, "revoked:\n");
-		print_partition_list(stderr, partitions);
-		rd_kafka_assign(rk, NULL);
-		wait_eof = 0;
-		break;
-
-	default:
-		fprintf(stderr, "failed: %s\n",
-                        rd_kafka_err2str(err));
-                rd_kafka_assign(rk, NULL);
-		break;
-	}
-}
-
-
-static int describe_groups (rd_kafka_t *rk, const char *group) {
-        rd_kafka_resp_err_t err;
-        const struct rd_kafka_group_list *grplist;
-        int i;
-
-        err = rd_kafka_list_groups(rk, group, &grplist, 10000);
-
-        if (err) {
-                fprintf(stderr, "%% Failed to acquire group list: %s\n",
-                        rd_kafka_err2str(err));
-                return -1;
-        }
-
-        for (i = 0 ; i < grplist->group_cnt ; i++) {
-                const struct rd_kafka_group_info *gi = &grplist->groups[i];
-                int j;
-
-                printf("Group \"%s\" in state %s on broker %d (%s:%d)\n",
-                       gi->group, gi->state,
-                       gi->broker.id, gi->broker.host, gi->broker.port);
-                if (gi->err)
-                        printf(" Error: %s\n", rd_kafka_err2str(gi->err));
-                printf(" Protocol type \"%s\", protocol \"%s\", "
-                       "with %d member(s):\n",
-                       gi->protocol_type, gi->protocol, gi->member_cnt);
-
-                for (j = 0 ; j < gi->member_cnt ; j++) {
-                        const struct rd_kafka_group_member_info *mi;
-                        mi = &gi->members[j];
-
-                        printf("  \"%s\", client id \"%s\" on host %s\n",
-                               mi->member_id, mi->client_id, mi->client_host);
-                        printf("    metadata: %d bytes\n",
-                               mi->member_metadata_size);
-                        printf("    assignment: %d bytes\n",
-                               mi->member_assignment_size);
-                }
-                printf("\n");
-        }
-
-        if (group && !grplist->group_cnt)
-                fprintf(stderr, "%% No matching group (%s)\n", group);
-
-        rd_kafka_group_list_destroy(grplist);
-
-        return 0;
-}
-
-
-
-static void sig_usr1 (int sig) {
-	rd_kafka_dump(stdout, rk);
-}
-
-int main (int argc, char **argv) {
-        char mode = 'C';
-	char *brokers = "localhost:9092";
-	int opt;
-	rd_kafka_conf_t *conf;
-	rd_kafka_topic_conf_t *topic_conf;
-	char errstr[512];
-	const char *debug = NULL;
-	int do_conf_dump = 0;
-	char tmp[16];
-        rd_kafka_resp_err_t err;
-        char *group = NULL;
-        rd_kafka_topic_partition_list_t *topics;
-        int is_subscription;
-        int i;
-
-	quiet = !isatty(STDIN_FILENO);
-
-	/* Kafka configuration */
-	conf = rd_kafka_conf_new();
-
-        /* Set logger */
-        rd_kafka_conf_set_log_cb(conf, logger);
-
-	/* Quick termination */
-	snprintf(tmp, sizeof(tmp), "%i", SIGIO);
-	rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
-
-	/* Topic configuration */
-	topic_conf = rd_kafka_topic_conf_new();
-
-	while ((opt = getopt(argc, argv, "g:b:qd:eX:ADO")) != -1) {
-		switch (opt) {
-		case 'b':
-			brokers = optarg;
-			break;
-                case 'g':
-                        group = optarg;
-                        break;
-		case 'e':
-			exit_eof = 1;
-			break;
-		case 'd':
-			debug = optarg;
-			break;
-		case 'q':
-			quiet = 1;
-			break;
-		case 'A':
-			output = OUTPUT_RAW;
-			break;
-		case 'X':
-		{
-			char *name, *val;
-			rd_kafka_conf_res_t res;
-
-			if (!strcmp(optarg, "list") ||
-			    !strcmp(optarg, "help")) {
-				rd_kafka_conf_properties_show(stdout);
-				exit(0);
-			}
-
-			if (!strcmp(optarg, "dump")) {
-				do_conf_dump = 1;
-				continue;
-			}
-
-			name = optarg;
-			if (!(val = strchr(name, '='))) {
-				fprintf(stderr, "%% Expected "
-					"-X property=value, not %s\n", name);
-				exit(1);
-			}
-
-			*val = '\0';
-			val++;
-
-			res = RD_KAFKA_CONF_UNKNOWN;
-			/* Try "topic." prefixed properties on topic
-			 * conf first, and then fall through to global if
-			 * it didnt match a topic configuration property. */
-			if (!strncmp(name, "topic.", strlen("topic.")))
-				res = rd_kafka_topic_conf_set(topic_conf,
-							      name+
-							      strlen("topic."),
-							      val,
-							      errstr,
-							      sizeof(errstr));
-
-			if (res == RD_KAFKA_CONF_UNKNOWN)
-				res = rd_kafka_conf_set(conf, name, val,
-							errstr, sizeof(errstr));
-
-			if (res != RD_KAFKA_CONF_OK) {
-				fprintf(stderr, "%% %s\n", errstr);
-				exit(1);
-			}
-		}
-		break;
-
-                case 'D':
-                case 'O':
-                        mode = opt;
-                        break;
-
-		default:
-			goto usage;
-		}
-	}
-
-
-	if (do_conf_dump) {
-		const char **arr;
-		size_t cnt;
-		int pass;
-
-		for (pass = 0 ; pass < 2 ; pass++) {
-			if (pass == 0) {
-				arr = rd_kafka_conf_dump(conf, &cnt);
-				printf("# Global config\n");
-			} else {
-				printf("# Topic config\n");
-				arr = rd_kafka_topic_conf_dump(topic_conf,
-							       &cnt);
-			}
-
-			for (i = 0 ; i < (int)cnt ; i += 2)
-				printf("%s = %s\n",
-				       arr[i], arr[i+1]);
-
-			printf("\n");
-
-			rd_kafka_conf_dump_free(arr, cnt);
-		}
-
-		exit(0);
-	}
-
-
-	if (strchr("OC", mode) && optind == argc) {
-	usage:
-		fprintf(stderr,
-			"Usage: %s [options] <topic[:part]> <topic[:part]>..\n"
-			"\n"
-			"librdkafka version %s (0x%08x)\n"
-			"\n"
-			" Options:\n"
-                        "  -g <group>      Consumer group (%s)\n"
-			"  -b <brokers>    Broker address (%s)\n"
-			"  -e              Exit consumer when last message\n"
-			"                  in partition has been received.\n"
-                        "  -D              Describe group.\n"
-                        "  -O              Get commmitted offset(s)\n"
-			"  -d [facs..]     Enable debugging contexts:\n"
-			"                  %s\n"
-			"  -q              Be quiet\n"
-			"  -A              Raw payload output (consumer)\n"
-			"  -X <prop=name> Set arbitrary librdkafka "
-			"configuration property\n"
-			"               Properties prefixed with \"topic.\" "
-			"will be set on topic object.\n"
-			"               Use '-X list' to see the full list\n"
-			"               of supported properties.\n"
-			"\n"
-                        "For balanced consumer groups use the 'topic1 topic2..'"
-                        " format\n"
-                        "and for static assignment use "
-                        "'topic1:part1 topic1:part2 topic2:part1..'\n"
-			"\n",
-			argv[0],
-			rd_kafka_version_str(), rd_kafka_version(),
-                        group, brokers,
-			RD_KAFKA_DEBUG_CONTEXTS);
-		exit(1);
-	}
-
-
-	signal(SIGINT, stop);
-	signal(SIGUSR1, sig_usr1);
-
-	if (debug &&
-	    rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) !=
-	    RD_KAFKA_CONF_OK) {
-		fprintf(stderr, "%% Debug configuration failed: %s: %s\n",
-			errstr, debug);
-		exit(1);
-	}
-
-        /*
-         * Client/Consumer group
-         */
-
-        if (strchr("CO", mode)) {
-                /* Consumer groups require a group id */
-                if (!group)
-                        group = "rdkafka_consumer_example";
-                if (rd_kafka_conf_set(conf, "group.id", group,
-                                      errstr, sizeof(errstr)) !=
-                    RD_KAFKA_CONF_OK) {
-                        fprintf(stderr, "%% %s\n", errstr);
-                        exit(1);
-                }
-
-                /* Consumer groups always use broker based offset storage */
-                if (rd_kafka_topic_conf_set(topic_conf, "offset.store.method",
-                                            "broker",
-                                            errstr, sizeof(errstr)) !=
-                    RD_KAFKA_CONF_OK) {
-                        fprintf(stderr, "%% %s\n", errstr);
-                        exit(1);
-                }
-
-                /* Set default topic config for pattern-matched topics. */
-                rd_kafka_conf_set_default_topic_conf(conf, topic_conf);
-
-                /* Callback called on partition assignment changes */
-                rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
-        }
-
-        /* Create Kafka handle */
-        if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
-                                errstr, sizeof(errstr)))) {
-                fprintf(stderr,
-                        "%% Failed to create new consumer: %s\n",
-                        errstr);
-                exit(1);
-        }
-
-        /* Add brokers */
-        if (rd_kafka_brokers_add(rk, brokers) == 0) {
-                fprintf(stderr, "%% No valid brokers specified\n");
-                exit(1);
-        }
-
-
-        if (mode == 'D') {
-                int r;
-                /* Describe groups */
-                r = describe_groups(rk, group);
-
-                rd_kafka_destroy(rk);
-                exit(r == -1 ? 1 : 0);
-        }
-
-        /* Redirect rd_kafka_poll() to consumer_poll() */
-        rd_kafka_poll_set_consumer(rk);
-
-        topics = rd_kafka_topic_partition_list_new(argc - optind);
-        is_subscription = 1;
-        for (i = optind ; i < argc ; i++) {
-                /* Parse "topic[:part] */
-                char *topic = argv[i];
-                char *t;
-                int32_t partition = -1;
-
-                if ((t = strstr(topic, ":"))) {
-                        *t = '\0';
-                        partition = atoi(t+1);
-                        is_subscription = 0; /* is assignment */
-                        wait_eof++;
-                }
-
-                rd_kafka_topic_partition_list_add(topics, topic, partition);
-        }
-
-        if (mode == 'O') {
-                /* Offset query */
-
-                err = rd_kafka_committed(rk, topics, 5000);
-                if (err) {
-                        fprintf(stderr, "%% Failed to fetch offsets: %s\n",
-                                rd_kafka_err2str(err));
-                        exit(1);
-                }
-
-                for (i = 0 ; i < topics->cnt ; i++) {
-                        rd_kafka_topic_partition_t *p = &topics->elems[i];
-                        printf("Topic \"%s\" partition %"PRId32,
-                               p->topic, p->partition);
-                        if (p->err)
-                                printf(" error %s",
-                                       rd_kafka_err2str(p->err));
-                        else {
-                                printf(" offset %"PRId64"",
-                                       p->offset);
-
-                                if (p->metadata_size)
-                                        printf(" (%d bytes of metadata)",
-                                               (int)p->metadata_size);
-                        }
-                        printf("\n");
-                }
-
-                goto done;
-        }
-
-
-        if (is_subscription) {
-                fprintf(stderr, "%% Subscribing to %d topics\n", topics->cnt);
-
-                if ((err = rd_kafka_subscribe(rk, topics))) {
-                        fprintf(stderr,
-                                "%% Failed to start consuming topics: %s\n",
-                                rd_kafka_err2str(err));
-                        exit(1);
-                }
-        } else {
-                fprintf(stderr, "%% Assigning %d partitions\n", topics->cnt);
-
-                if ((err = rd_kafka_assign(rk, topics))) {
-                        fprintf(stderr,
-                                "%% Failed to assign partitions: %s\n",
-                                rd_kafka_err2str(err));
-                }
-        }
-
-        while (run) {
-                rd_kafka_message_t *rkmessage;
-
-                rkmessage = rd_kafka_consumer_poll(rk, 1000);
-                if (rkmessage) {
-                        msg_consume(rkmessage);
-                        rd_kafka_message_destroy(rkmessage);
-                }
-        }
-
-done:
-        err = rd_kafka_consumer_close(rk);
-        if (err)
-                fprintf(stderr, "%% Failed to close consumer: %s\n",
-                        rd_kafka_err2str(err));
-        else
-                fprintf(stderr, "%% Consumer closed\n");
-
-        rd_kafka_topic_partition_list_destroy(topics);
-
-        /* Destroy handle */
-        rd_kafka_destroy(rk);
-
-	/* Let background threads clean up and terminate cleanly. */
-	run = 5;
-	while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1)
-		printf("Waiting for librdkafka to decommission\n");
-	if (run <= 0)
-		rd_kafka_dump(stdout, rk);
-
-	return 0;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/examples/rdkafka_consumer_example.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/examples/rdkafka_consumer_example.cpp b/thirdparty/librdkafka-0.11.1/examples/rdkafka_consumer_example.cpp
deleted file mode 100644
index 83da691..0000000
--- a/thirdparty/librdkafka-0.11.1/examples/rdkafka_consumer_example.cpp
+++ /dev/null
@@ -1,485 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2014, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Apache Kafka consumer & producer example programs
- * using the Kafka driver from librdkafka
- * (https://github.com/edenhill/librdkafka)
- */
-
-#include <iostream>
-#include <string>
-#include <cstdlib>
-#include <cstdio>
-#include <csignal>
-#include <cstring>
-
-#ifndef _MSC_VER
-#include <sys/time.h>
-#endif
-
-#ifdef _MSC_VER
-#include "../win32/wingetopt.h"
-#include <atltime.h>
-#elif _AIX
-#include <unistd.h>
-#else
-#include <getopt.h>
-#include <unistd.h>
-#endif
-
-/*
- * Typically include path in a real application would be
- * #include <librdkafka/rdkafkacpp.h>
- */
-#include "rdkafkacpp.h"
-
-
-
-static bool run = true;
-static bool exit_eof = false;
-static int eof_cnt = 0;
-static int partition_cnt = 0;
-static int verbosity = 1;
-static long msg_cnt = 0;
-static int64_t msg_bytes = 0;
-static void sigterm (int sig) {
-  run = false;
-}
-
-
-/**
- * @brief format a string timestamp from the current time
- */
-static void print_time () {
-#ifndef _MSC_VER
-        struct timeval tv;
-        char buf[64];
-        gettimeofday(&tv, NULL);
-        strftime(buf, sizeof(buf) - 1, "%Y-%m-%d %H:%M:%S", localtime(&tv.tv_sec));
-        fprintf(stderr, "%s.%03d: ", buf, (int)(tv.tv_usec / 1000));
-#else
-        std::wcerr << CTime::GetCurrentTime().Format(_T("%Y-%m-%d %H:%M:%S")).GetString()
-                << ": ";
-#endif
-}
-class ExampleEventCb : public RdKafka::EventCb {
- public:
-  void event_cb (RdKafka::Event &event) {
-
-    print_time();
-
-    switch (event.type())
-    {
-      case RdKafka::Event::EVENT_ERROR:
-        std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
-            event.str() << std::endl;
-        if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
-          run = false;
-        break;
-
-      case RdKafka::Event::EVENT_STATS:
-        std::cerr << "\"STATS\": " << event.str() << std::endl;
-        break;
-
-      case RdKafka::Event::EVENT_LOG:
-        fprintf(stderr, "LOG-%i-%s: %s\n",
-                event.severity(), event.fac().c_str(), event.str().c_str());
-        break;
-
-      case RdKafka::Event::EVENT_THROTTLE:
-	std::cerr << "THROTTLED: " << event.throttle_time() << "ms by " <<
-	  event.broker_name() << " id " << (int)event.broker_id() << std::endl;
-	break;
-
-      default:
-        std::cerr << "EVENT " << event.type() <<
-            " (" << RdKafka::err2str(event.err()) << "): " <<
-            event.str() << std::endl;
-        break;
-    }
-  }
-};
-
-
-class ExampleRebalanceCb : public RdKafka::RebalanceCb {
-private:
-  static void part_list_print (const std::vector<RdKafka::TopicPartition*>&partitions){
-    for (unsigned int i = 0 ; i < partitions.size() ; i++)
-      std::cerr << partitions[i]->topic() <<
-	"[" << partitions[i]->partition() << "], ";
-    std::cerr << "\n";
-  }
-
-public:
-  void rebalance_cb (RdKafka::KafkaConsumer *consumer,
-		     RdKafka::ErrorCode err,
-                     std::vector<RdKafka::TopicPartition*> &partitions) {
-    std::cerr << "RebalanceCb: " << RdKafka::err2str(err) << ": ";
-
-    part_list_print(partitions);
-
-    if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
-      consumer->assign(partitions);
-      partition_cnt = (int)partitions.size();
-    } else {
-      consumer->unassign();
-      partition_cnt = 0;
-    }
-    eof_cnt = 0;
-  }
-};
-
-
-void msg_consume(RdKafka::Message* message, void* opaque) {
-  switch (message->err()) {
-    case RdKafka::ERR__TIMED_OUT:
-      break;
-
-    case RdKafka::ERR_NO_ERROR:
-      /* Real message */
-      msg_cnt++;
-      msg_bytes += message->len();
-      if (verbosity >= 3)
-        std::cerr << "Read msg at offset " << message->offset() << std::endl;
-      RdKafka::MessageTimestamp ts;
-      ts = message->timestamp();
-      if (verbosity >= 2 &&
-	  ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) {
-	std::string tsname = "?";
-	if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME)
-	  tsname = "create time";
-        else if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_LOG_APPEND_TIME)
-          tsname = "log append time";
-        std::cout << "Timestamp: " << tsname << " " << ts.timestamp << std::endl;
-      }
-      if (verbosity >= 2 && message->key()) {
-        std::cout << "Key: " << *message->key() << std::endl;
-      }
-      if (verbosity >= 1) {
-        printf("%.*s\n",
-               static_cast<int>(message->len()),
-               static_cast<const char *>(message->payload()));
-      }
-      break;
-
-    case RdKafka::ERR__PARTITION_EOF:
-      /* Last message */
-      if (exit_eof && ++eof_cnt == partition_cnt) {
-        std::cerr << "%% EOF reached for all " << partition_cnt <<
-            " partition(s)" << std::endl;
-        run = false;
-      }
-      break;
-
-    case RdKafka::ERR__UNKNOWN_TOPIC:
-    case RdKafka::ERR__UNKNOWN_PARTITION:
-      std::cerr << "Consume failed: " << message->errstr() << std::endl;
-      run = false;
-      break;
-
-    default:
-      /* Errors */
-      std::cerr << "Consume failed: " << message->errstr() << std::endl;
-      run = false;
-  }
-}
-
-
-class ExampleConsumeCb : public RdKafka::ConsumeCb {
- public:
-  void consume_cb (RdKafka::Message &msg, void *opaque) {
-    msg_consume(&msg, opaque);
-  }
-};
-
-
-
-int main (int argc, char **argv) {
-  std::string brokers = "localhost";
-  std::string errstr;
-  std::string topic_str;
-  std::string mode;
-  std::string debug;
-  std::vector<std::string> topics;
-  bool do_conf_dump = false;
-  int opt;
-  int use_ccb = 0;
-
-  /*
-   * Create configuration objects
-   */
-  RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
-  RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
-
-  ExampleRebalanceCb ex_rebalance_cb;
-  conf->set("rebalance_cb", &ex_rebalance_cb, errstr);
-
-  while ((opt = getopt(argc, argv, "g:b:z:qd:eX:AM:f:qv")) != -1) {
-    switch (opt) {
-    case 'g':
-      if (conf->set("group.id",  optarg, errstr) != RdKafka::Conf::CONF_OK) {
-        std::cerr << errstr << std::endl;
-        exit(1);
-      }
-      break;
-    case 'b':
-      brokers = optarg;
-      break;
-    case 'z':
-      if (conf->set("compression.codec", optarg, errstr) !=
-	  RdKafka::Conf::CONF_OK) {
-	std::cerr << errstr << std::endl;
-	exit(1);
-      }
-      break;
-    case 'e':
-      exit_eof = true;
-      break;
-    case 'd':
-      debug = optarg;
-      break;
-    case 'M':
-      if (conf->set("statistics.interval.ms", optarg, errstr) !=
-          RdKafka::Conf::CONF_OK) {
-        std::cerr << errstr << std::endl;
-        exit(1);
-      }
-      break;
-    case 'X':
-      {
-	char *name, *val;
-
-	if (!strcmp(optarg, "dump")) {
-	  do_conf_dump = true;
-	  continue;
-	}
-
-	name = optarg;
-	if (!(val = strchr(name, '='))) {
-          std::cerr << "%% Expected -X property=value, not " <<
-              name << std::endl;
-	  exit(1);
-	}
-
-	*val = '\0';
-	val++;
-
-	/* Try "topic." prefixed properties on topic
-	 * conf first, and then fall through to global if
-	 * it didnt match a topic configuration property. */
-        RdKafka::Conf::ConfResult res = RdKafka::Conf::CONF_UNKNOWN;
-	if (!strncmp(name, "topic.", strlen("topic.")))
-          res = tconf->set(name+strlen("topic."), val, errstr);
-        if (res == RdKafka::Conf::CONF_UNKNOWN)
-	  res = conf->set(name, val, errstr);
-
-	if (res != RdKafka::Conf::CONF_OK) {
-          std::cerr << errstr << std::endl;
-	  exit(1);
-	}
-      }
-      break;
-
-      case 'f':
-        if (!strcmp(optarg, "ccb"))
-          use_ccb = 1;
-        else {
-          std::cerr << "Unknown option: " << optarg << std::endl;
-          exit(1);
-        }
-        break;
-
-      case 'q':
-        verbosity--;
-        break;
-
-      case 'v':
-        verbosity++;
-        break;
-
-    default:
-      goto usage;
-    }
-  }
-
-  for (; optind < argc ; optind++)
-    topics.push_back(std::string(argv[optind]));
-
-  if (topics.empty() || optind != argc) {
-  usage:
-    fprintf(stderr,
-            "Usage: %s -g <group-id> [options] topic1 topic2..\n"
-            "\n"
-            "librdkafka version %s (0x%08x)\n"
-            "\n"
-            " Options:\n"
-            "  -g <group-id>   Consumer group id\n"
-            "  -b <brokers>    Broker address (localhost:9092)\n"
-            "  -z <codec>      Enable compression:\n"
-            "                  none|gzip|snappy\n"
-            "  -e              Exit consumer when last message\n"
-            "                  in partition has been received.\n"
-            "  -d [facs..]     Enable debugging contexts:\n"
-            "                  %s\n"
-            "  -M <intervalms> Enable statistics\n"
-            "  -X <prop=name>  Set arbitrary librdkafka "
-            "configuration property\n"
-            "                  Properties prefixed with \"topic.\" "
-            "will be set on topic object.\n"
-            "                  Use '-X list' to see the full list\n"
-            "                  of supported properties.\n"
-            "  -f <flag>       Set option:\n"
-            "                     ccb - use consume_callback\n"
-            "  -q              Quiet / Decrease verbosity\n"
-            "  -v              Increase verbosity\n"
-            "\n"
-            "\n",
-	    argv[0],
-	    RdKafka::version_str().c_str(), RdKafka::version(),
-	    RdKafka::get_debug_contexts().c_str());
-	exit(1);
-  }
-
-
-  /*
-   * Set configuration properties
-   */
-  conf->set("metadata.broker.list", brokers, errstr);
-
-  if (!debug.empty()) {
-    if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) {
-      std::cerr << errstr << std::endl;
-      exit(1);
-    }
-  }
-
-  ExampleConsumeCb ex_consume_cb;
-
-  if(use_ccb) {
-    conf->set("consume_cb", &ex_consume_cb, errstr);
-  }
-
-  ExampleEventCb ex_event_cb;
-  conf->set("event_cb", &ex_event_cb, errstr);
-
-  if (do_conf_dump) {
-    int pass;
-
-    for (pass = 0 ; pass < 2 ; pass++) {
-      std::list<std::string> *dump;
-      if (pass == 0) {
-        dump = conf->dump();
-        std::cout << "# Global config" << std::endl;
-      } else {
-        dump = tconf->dump();
-        std::cout << "# Topic config" << std::endl;
-      }
-
-      for (std::list<std::string>::iterator it = dump->begin();
-           it != dump->end(); ) {
-        std::cout << *it << " = ";
-        it++;
-        std::cout << *it << std::endl;
-        it++;
-      }
-      std::cout << std::endl;
-    }
-    exit(0);
-  }
-
-  conf->set("default_topic_conf", tconf, errstr);
-  delete tconf;
-
-  signal(SIGINT, sigterm);
-  signal(SIGTERM, sigterm);
-
-
-  /*
-   * Consumer mode
-   */
-
-  /*
-   * Create consumer using accumulated global configuration.
-   */
-  RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create(conf, errstr);
-  if (!consumer) {
-    std::cerr << "Failed to create consumer: " << errstr << std::endl;
-    exit(1);
-  }
-
-  delete conf;
-
-  std::cout << "% Created consumer " << consumer->name() << std::endl;
-
-
-  /*
-   * Subscribe to topics
-   */
-  RdKafka::ErrorCode err = consumer->subscribe(topics);
-  if (err) {
-    std::cerr << "Failed to subscribe to " << topics.size() << " topics: "
-              << RdKafka::err2str(err) << std::endl;
-    exit(1);
-  }
-
-  /*
-   * Consume messages
-   */
-  while (run) {
-    RdKafka::Message *msg = consumer->consume(1000);
-    if (!use_ccb) {
-      msg_consume(msg, NULL);
-    }
-    delete msg;
-  }
-
-#ifndef _MSC_VER
-  alarm(10);
-#endif
-
-  /*
-   * Stop consumer
-   */
-  consumer->close();
-  delete consumer;
-
-  std::cerr << "% Consumed " << msg_cnt << " messages ("
-            << msg_bytes << " bytes)" << std::endl;
-
-  /*
-   * Wait for RdKafka to decommission.
-   * This is not strictly needed (with check outq_len() above), but
-   * allows RdKafka to clean up all its resources before the application
-   * exits so that memory profilers such as valgrind wont complain about
-   * memory leaks.
-   */
-  RdKafka::wait_destroyed(5000);
-
-  return 0;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/examples/rdkafka_example.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/examples/rdkafka_example.c b/thirdparty/librdkafka-0.11.1/examples/rdkafka_example.c
deleted file mode 100644
index 61c7ac5..0000000
--- a/thirdparty/librdkafka-0.11.1/examples/rdkafka_example.c
+++ /dev/null
@@ -1,806 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Apache Kafka consumer & producer example programs
- * using the Kafka driver from librdkafka
- * (https://github.com/edenhill/librdkafka)
- */
-
-#include <ctype.h>
-#include <signal.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <syslog.h>
-#include <time.h>
-#include <sys/time.h>
-#include <getopt.h>
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is builtin from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h"  /* for Kafka driver */
-
-
-static int run = 1;
-static rd_kafka_t *rk;
-static int exit_eof = 0;
-static int quiet = 0;
-static 	enum {
-	OUTPUT_HEXDUMP,
-	OUTPUT_RAW,
-} output = OUTPUT_HEXDUMP;
-
-static void stop (int sig) {
-	run = 0;
-	fclose(stdin); /* abort fgets() */
-}
-
-
-static void hexdump (FILE *fp, const char *name, const void *ptr, size_t len) {
-	const char *p = (const char *)ptr;
-	size_t of = 0;
-
-
-	if (name)
-		fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
-
-	for (of = 0 ; of < len ; of += 16) {
-		char hexen[16*3+1];
-		char charen[16+1];
-		int hof = 0;
-
-		int cof = 0;
-		int i;
-
-		for (i = of ; i < (int)of + 16 && i < (int)len ; i++) {
-			hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff);
-			cof += sprintf(charen+cof, "%c",
-				       isprint((int)p[i]) ? p[i] : '.');
-		}
-		fprintf(fp, "%08zx: %-48s %-16s\n",
-			of, hexen, charen);
-	}
-}
-
-/**
- * Kafka logger callback (optional)
- */
-static void logger (const rd_kafka_t *rk, int level,
-		    const char *fac, const char *buf) {
-	struct timeval tv;
-	gettimeofday(&tv, NULL);
-	fprintf(stderr, "%u.%03u RDKAFKA-%i-%s: %s: %s\n",
-		(int)tv.tv_sec, (int)(tv.tv_usec / 1000),
-		level, fac, rk ? rd_kafka_name(rk) : NULL, buf);
-}
-
-/**
- * Message delivery report callback.
- * Called once for each message.
- * See rdkafka.h for more information.
- */
-static void msg_delivered (rd_kafka_t *rk,
-			   void *payload, size_t len,
-			   int error_code,
-			   void *opaque, void *msg_opaque) {
-
-	if (error_code)
-		fprintf(stderr, "%% Message delivery failed: %s\n",
-			rd_kafka_err2str(error_code));
-	else if (!quiet)
-		fprintf(stderr, "%% Message delivered (%zd bytes): %.*s\n", len,
-			(int)len, (const char *)payload);
-}
-
-/**
- * Message delivery report callback using the richer rd_kafka_message_t object.
- */
-static void msg_delivered2 (rd_kafka_t *rk,
-                            const rd_kafka_message_t *rkmessage, void *opaque) {
-	printf("del: %s: offset %"PRId64"\n",
-	       rd_kafka_err2str(rkmessage->err), rkmessage->offset);
-        if (rkmessage->err)
-		fprintf(stderr, "%% Message delivery failed: %s\n",
-                        rd_kafka_err2str(rkmessage->err));
-	else if (!quiet)
-		fprintf(stderr,
-                        "%% Message delivered (%zd bytes, offset %"PRId64", "
-                        "partition %"PRId32"): %.*s\n",
-                        rkmessage->len, rkmessage->offset,
-			rkmessage->partition,
-			(int)rkmessage->len, (const char *)rkmessage->payload);
-}
-
-
-static void msg_consume (rd_kafka_message_t *rkmessage,
-			 void *opaque) {
-	if (rkmessage->err) {
-		if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
-			fprintf(stderr,
-				"%% Consumer reached end of %s [%"PRId32"] "
-			       "message queue at offset %"PRId64"\n",
-			       rd_kafka_topic_name(rkmessage->rkt),
-			       rkmessage->partition, rkmessage->offset);
-
-			if (exit_eof)
-				run = 0;
-
-			return;
-		}
-
-		fprintf(stderr, "%% Consume error for topic \"%s\" [%"PRId32"] "
-		       "offset %"PRId64": %s\n",
-		       rd_kafka_topic_name(rkmessage->rkt),
-		       rkmessage->partition,
-		       rkmessage->offset,
-		       rd_kafka_message_errstr(rkmessage));
-
-                if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
-                    rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
-                        run = 0;
-		return;
-	}
-
-	if (!quiet) {
-		rd_kafka_timestamp_type_t tstype;
-		int64_t timestamp;
-		fprintf(stdout, "%% Message (offset %"PRId64", %zd bytes):\n",
-			rkmessage->offset, rkmessage->len);
-
-		timestamp = rd_kafka_message_timestamp(rkmessage, &tstype);
-		if (tstype != RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) {
-			const char *tsname = "?";
-			if (tstype == RD_KAFKA_TIMESTAMP_CREATE_TIME)
-				tsname = "create time";
-			else if (tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME)
-				tsname = "log append time";
-
-			fprintf(stdout, "%% Message timestamp: %s %"PRId64
-				" (%ds ago)\n",
-				tsname, timestamp,
-				!timestamp ? 0 :
-				(int)time(NULL) - (int)(timestamp/1000));
-		}
-	}
-
-	if (rkmessage->key_len) {
-		if (output == OUTPUT_HEXDUMP)
-			hexdump(stdout, "Message Key",
-				rkmessage->key, rkmessage->key_len);
-		else
-			printf("Key: %.*s\n",
-			       (int)rkmessage->key_len, (char *)rkmessage->key);
-	}
-
-	if (output == OUTPUT_HEXDUMP)
-		hexdump(stdout, "Message Payload",
-			rkmessage->payload, rkmessage->len);
-	else
-		printf("%.*s\n",
-		       (int)rkmessage->len, (char *)rkmessage->payload);
-}
-
-
-static void metadata_print (const char *topic,
-                            const struct rd_kafka_metadata *metadata) {
-        int i, j, k;
-
-        printf("Metadata for %s (from broker %"PRId32": %s):\n",
-               topic ? : "all topics",
-               metadata->orig_broker_id,
-               metadata->orig_broker_name);
-
-
-        /* Iterate brokers */
-        printf(" %i brokers:\n", metadata->broker_cnt);
-        for (i = 0 ; i < metadata->broker_cnt ; i++)
-                printf("  broker %"PRId32" at %s:%i\n",
-                       metadata->brokers[i].id,
-                       metadata->brokers[i].host,
-                       metadata->brokers[i].port);
-
-        /* Iterate topics */
-        printf(" %i topics:\n", metadata->topic_cnt);
-        for (i = 0 ; i < metadata->topic_cnt ; i++) {
-                const struct rd_kafka_metadata_topic *t = &metadata->topics[i];
-                printf("  topic \"%s\" with %i partitions:",
-                       t->topic,
-                       t->partition_cnt);
-                if (t->err) {
-                        printf(" %s", rd_kafka_err2str(t->err));
-                        if (t->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
-                                printf(" (try again)");
-                }
-                printf("\n");
-
-                /* Iterate topic's partitions */
-                for (j = 0 ; j < t->partition_cnt ; j++) {
-                        const struct rd_kafka_metadata_partition *p;
-                        p = &t->partitions[j];
-                        printf("    partition %"PRId32", "
-                               "leader %"PRId32", replicas: ",
-                               p->id, p->leader);
-
-                        /* Iterate partition's replicas */
-                        for (k = 0 ; k < p->replica_cnt ; k++)
-                                printf("%s%"PRId32,
-                                       k > 0 ? ",":"", p->replicas[k]);
-
-                        /* Iterate partition's ISRs */
-                        printf(", isrs: ");
-                        for (k = 0 ; k < p->isr_cnt ; k++)
-                                printf("%s%"PRId32,
-                                       k > 0 ? ",":"", p->isrs[k]);
-                        if (p->err)
-                                printf(", %s\n", rd_kafka_err2str(p->err));
-                        else
-                                printf("\n");
-                }
-        }
-}
-
-
-static void sig_usr1 (int sig) {
-	rd_kafka_dump(stdout, rk);
-}
-
-int main (int argc, char **argv) {
-	rd_kafka_topic_t *rkt;
-	char *brokers = "localhost:9092";
-	char mode = 'C';
-	char *topic = NULL;
-	int partition = RD_KAFKA_PARTITION_UA;
-	int opt;
-	rd_kafka_conf_t *conf;
-	rd_kafka_topic_conf_t *topic_conf;
-	char errstr[512];
-	int64_t start_offset = 0;
-        int report_offsets = 0;
-	int do_conf_dump = 0;
-	char tmp[16];
-        int64_t seek_offset = 0;
-        int64_t tmp_offset = 0;
-	int get_wmarks = 0;
-
-	/* Kafka configuration */
-	conf = rd_kafka_conf_new();
-
-        /* Set logger */
-        rd_kafka_conf_set_log_cb(conf, logger);
-
-	/* Quick termination */
-	snprintf(tmp, sizeof(tmp), "%i", SIGIO);
-	rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
-
-	/* Topic configuration */
-	topic_conf = rd_kafka_topic_conf_new();
-
-	while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:As:")) != -1) {
-		switch (opt) {
-		case 'P':
-		case 'C':
-                case 'L':
-			mode = opt;
-			break;
-		case 't':
-			topic = optarg;
-			break;
-		case 'p':
-			partition = atoi(optarg);
-			break;
-		case 'b':
-			brokers = optarg;
-			break;
-		case 'z':
-			if (rd_kafka_conf_set(conf, "compression.codec",
-					      optarg,
-					      errstr, sizeof(errstr)) !=
-			    RD_KAFKA_CONF_OK) {
-				fprintf(stderr, "%% %s\n", errstr);
-				exit(1);
-			}
-			break;
-		case 'o':
-                case 's':
-			if (!strcmp(optarg, "end"))
-				tmp_offset = RD_KAFKA_OFFSET_END;
-			else if (!strcmp(optarg, "beginning"))
-				tmp_offset = RD_KAFKA_OFFSET_BEGINNING;
-			else if (!strcmp(optarg, "stored"))
-				tmp_offset = RD_KAFKA_OFFSET_STORED;
-                        else if (!strcmp(optarg, "report"))
-                                report_offsets = 1;
-			else if (!strcmp(optarg, "wmark"))
-				get_wmarks = 1;
-			else {
-				tmp_offset = strtoll(optarg, NULL, 10);
-
-				if (tmp_offset < 0)
-					tmp_offset = RD_KAFKA_OFFSET_TAIL(-tmp_offset);
-			}
-
-                        if (opt == 'o')
-                                start_offset = tmp_offset;
-                        else if (opt == 's')
-                                seek_offset = tmp_offset;
-			break;
-		case 'e':
-			exit_eof = 1;
-			break;
-		case 'd':
-			if (rd_kafka_conf_set(conf, "debug", optarg,
-					      errstr, sizeof(errstr)) !=
-			    RD_KAFKA_CONF_OK) {
-				fprintf(stderr,
-					"%% Debug configuration failed: "
-					"%s: %s\n",
-					errstr, optarg);
-				exit(1);
-			}
-			break;
-		case 'q':
-			quiet = 1;
-			break;
-		case 'A':
-			output = OUTPUT_RAW;
-			break;
-		case 'X':
-		{
-			char *name, *val;
-			rd_kafka_conf_res_t res;
-
-			if (!strcmp(optarg, "list") ||
-			    !strcmp(optarg, "help")) {
-				rd_kafka_conf_properties_show(stdout);
-				exit(0);
-			}
-
-			if (!strcmp(optarg, "dump")) {
-				do_conf_dump = 1;
-				continue;
-			}
-
-			name = optarg;
-			if (!(val = strchr(name, '='))) {
-				char dest[512];
-				size_t dest_size = sizeof(dest);
-				/* Return current value for property. */
-
-				res = RD_KAFKA_CONF_UNKNOWN;
-				if (!strncmp(name, "topic.", strlen("topic.")))
-					res = rd_kafka_topic_conf_get(
-						topic_conf,
-						name+strlen("topic."),
-						dest, &dest_size);
-				if (res == RD_KAFKA_CONF_UNKNOWN)
-					res = rd_kafka_conf_get(
-						conf, name, dest, &dest_size);
-
-				if (res == RD_KAFKA_CONF_OK) {
-					printf("%s = %s\n", name, dest);
-					exit(0);
-				} else {
-					fprintf(stderr,
-						"%% %s property\n",
-						res == RD_KAFKA_CONF_UNKNOWN ?
-						"Unknown" : "Invalid");
-					exit(1);
-				}
-			}
-
-			*val = '\0';
-			val++;
-
-			res = RD_KAFKA_CONF_UNKNOWN;
-			/* Try "topic." prefixed properties on topic
-			 * conf first, and then fall through to global if
-			 * it didnt match a topic configuration property. */
-			if (!strncmp(name, "topic.", strlen("topic.")))
-				res = rd_kafka_topic_conf_set(topic_conf,
-							      name+
-							      strlen("topic."),
-							      val,
-							      errstr,
-							      sizeof(errstr));
-
-			if (res == RD_KAFKA_CONF_UNKNOWN)
-				res = rd_kafka_conf_set(conf, name, val,
-							errstr, sizeof(errstr));
-
-			if (res != RD_KAFKA_CONF_OK) {
-				fprintf(stderr, "%% %s\n", errstr);
-				exit(1);
-			}
-		}
-		break;
-
-		default:
-			goto usage;
-		}
-	}
-
-
-	if (do_conf_dump) {
-		const char **arr;
-		size_t cnt;
-		int pass;
-
-		for (pass = 0 ; pass < 2 ; pass++) {
-			int i;
-
-			if (pass == 0) {
-				arr = rd_kafka_conf_dump(conf, &cnt);
-				printf("# Global config\n");
-			} else {
-				printf("# Topic config\n");
-				arr = rd_kafka_topic_conf_dump(topic_conf,
-							       &cnt);
-			}
-
-			for (i = 0 ; i < (int)cnt ; i += 2)
-				printf("%s = %s\n",
-				       arr[i], arr[i+1]);
-
-			printf("\n");
-
-			rd_kafka_conf_dump_free(arr, cnt);
-		}
-
-		exit(0);
-	}
-
-
-	if (optind != argc || (mode != 'L' && !topic)) {
-	usage:
-		fprintf(stderr,
-			"Usage: %s -C|-P|-L -t <topic> "
-			"[-p <partition>] [-b <host1:port1,host2:port2,..>]\n"
-			"\n"
-			"librdkafka version %s (0x%08x)\n"
-			"\n"
-			" Options:\n"
-			"  -C | -P         Consumer or Producer mode\n"
-                        "  -L              Metadata list mode\n"
-			"  -t <topic>      Topic to fetch / produce\n"
-			"  -p <num>        Partition (random partitioner)\n"
-			"  -b <brokers>    Broker address (localhost:9092)\n"
-			"  -z <codec>      Enable compression:\n"
-			"                  none|gzip|snappy\n"
-			"  -o <offset>     Start offset (consumer):\n"
-			"                  beginning, end, NNNNN or -NNNNN\n"
-			"                  wmark returns the current hi&lo "
-			"watermarks.\n"
-                        "  -o report       Report message offsets (producer)\n"
-			"  -e              Exit consumer when last message\n"
-			"                  in partition has been received.\n"
-			"  -d [facs..]     Enable debugging contexts:\n"
-			"                  %s\n"
-			"  -q              Be quiet\n"
-			"  -A              Raw payload output (consumer)\n"
-			"  -X <prop=name>  Set arbitrary librdkafka "
-			"configuration property\n"
-			"                  Properties prefixed with \"topic.\" "
-			"will be set on topic object.\n"
-			"  -X list         Show full list of supported "
-			"properties.\n"
-			"  -X <prop>       Get single property value\n"
-			"\n"
-			" In Consumer mode:\n"
-			"  writes fetched messages to stdout\n"
-			" In Producer mode:\n"
-			"  reads messages from stdin and sends to broker\n"
-                        " In List mode:\n"
-                        "  queries broker for metadata information, "
-                        "topic is optional.\n"
-			"\n"
-			"\n"
-			"\n",
-			argv[0],
-			rd_kafka_version_str(), rd_kafka_version(),
-			RD_KAFKA_DEBUG_CONTEXTS);
-		exit(1);
-	}
-
-	if ((mode == 'C' && !isatty(STDIN_FILENO)) ||
-	    (mode != 'C' && !isatty(STDOUT_FILENO)))
-		quiet = 1;
-
-
-	signal(SIGINT, stop);
-	signal(SIGUSR1, sig_usr1);
-
-	if (mode == 'P') {
-		/*
-		 * Producer
-		 */
-		char buf[2048];
-		int sendcnt = 0;
-
-		/* Set up a message delivery report callback.
-		 * It will be called once for each message, either on successful
-		 * delivery to broker, or upon failure to deliver to broker. */
-
-                /* If offset reporting (-o report) is enabled, use the
-                 * richer dr_msg_cb instead. */
-                if (report_offsets) {
-                        rd_kafka_topic_conf_set(topic_conf,
-                                                "produce.offset.report",
-                                                "true", errstr, sizeof(errstr));
-                        rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered2);
-                } else
-                        rd_kafka_conf_set_dr_cb(conf, msg_delivered);
-
-		/* Create Kafka handle */
-		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
-					errstr, sizeof(errstr)))) {
-			fprintf(stderr,
-				"%% Failed to create new producer: %s\n",
-				errstr);
-			exit(1);
-		}
-
-		/* Add brokers */
-		if (rd_kafka_brokers_add(rk, brokers) == 0) {
-			fprintf(stderr, "%% No valid brokers specified\n");
-			exit(1);
-		}
-
-		/* Create topic */
-		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
-                topic_conf = NULL; /* Now owned by topic */
-
-		if (!quiet)
-			fprintf(stderr,
-				"%% Type stuff and hit enter to send\n");
-
-		while (run && fgets(buf, sizeof(buf), stdin)) {
-			size_t len = strlen(buf);
-			if (buf[len-1] == '\n')
-				buf[--len] = '\0';
-
-			/* Send/Produce message. */
-			if (rd_kafka_produce(rkt, partition,
-					     RD_KAFKA_MSG_F_COPY,
-					     /* Payload and length */
-					     buf, len,
-					     /* Optional key and its length */
-					     NULL, 0,
-					     /* Message opaque, provided in
-					      * delivery report callback as
-					      * msg_opaque. */
-					     NULL) == -1) {
-				fprintf(stderr,
-					"%% Failed to produce to topic %s "
-					"partition %i: %s\n",
-					rd_kafka_topic_name(rkt), partition,
-					rd_kafka_err2str(rd_kafka_last_error()));
-				/* Poll to handle delivery reports */
-				rd_kafka_poll(rk, 0);
-				continue;
-			}
-
-			if (!quiet)
-				fprintf(stderr, "%% Sent %zd bytes to topic "
-					"%s partition %i\n",
-				len, rd_kafka_topic_name(rkt), partition);
-			sendcnt++;
-			/* Poll to handle delivery reports */
-			rd_kafka_poll(rk, 0);
-		}
-
-		/* Poll to handle delivery reports */
-		rd_kafka_poll(rk, 0);
-
-		/* Wait for messages to be delivered */
-		while (run && rd_kafka_outq_len(rk) > 0)
-			rd_kafka_poll(rk, 100);
-
-		/* Destroy topic */
-		rd_kafka_topic_destroy(rkt);
-
-		/* Destroy the handle */
-		rd_kafka_destroy(rk);
-
-	} else if (mode == 'C') {
-		/*
-		 * Consumer
-		 */
-
-		/* Create Kafka handle */
-		if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
-					errstr, sizeof(errstr)))) {
-			fprintf(stderr,
-				"%% Failed to create new consumer: %s\n",
-				errstr);
-			exit(1);
-		}
-
-		/* Add brokers */
-		if (rd_kafka_brokers_add(rk, brokers) == 0) {
-			fprintf(stderr, "%% No valid brokers specified\n");
-			exit(1);
-		}
-
-		if (get_wmarks) {
-			int64_t lo, hi;
-                        rd_kafka_resp_err_t err;
-
-			/* Only query for hi&lo partition watermarks */
-
-			if ((err = rd_kafka_query_watermark_offsets(
-				     rk, topic, partition, &lo, &hi, 5000))) {
-				fprintf(stderr, "%% query_watermark_offsets() "
-					"failed: %s\n",
-					rd_kafka_err2str(err));
-				exit(1);
-			}
-
-			printf("%s [%d]: low - high offsets: "
-			       "%"PRId64" - %"PRId64"\n",
-			       topic, partition, lo, hi);
-
-			rd_kafka_destroy(rk);
-			exit(0);
-		}
-
-
-		/* Create topic */
-		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
-                topic_conf = NULL; /* Now owned by topic */
-
-		/* Start consuming */
-		if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){
-			rd_kafka_resp_err_t err = rd_kafka_last_error();
-			fprintf(stderr, "%% Failed to start consuming: %s\n",
-				rd_kafka_err2str(err));
-                        if (err == RD_KAFKA_RESP_ERR__INVALID_ARG)
-                                fprintf(stderr,
-                                        "%% Broker based offset storage "
-                                        "requires a group.id, "
-                                        "add: -X group.id=yourGroup\n");
-			exit(1);
-		}
-
-		while (run) {
-			rd_kafka_message_t *rkmessage;
-                        rd_kafka_resp_err_t err;
-
-                        /* Poll for errors, etc. */
-                        rd_kafka_poll(rk, 0);
-
-			/* Consume single message.
-			 * See rdkafka_performance.c for high speed
-			 * consuming of messages. */
-			rkmessage = rd_kafka_consume(rkt, partition, 1000);
-			if (!rkmessage) /* timeout */
-				continue;
-
-			msg_consume(rkmessage, NULL);
-
-			/* Return message to rdkafka */
-			rd_kafka_message_destroy(rkmessage);
-
-                        if (seek_offset) {
-                                err = rd_kafka_seek(rkt, partition, seek_offset,
-                                                    2000);
-                                if (err)
-                                        printf("Seek failed: %s\n",
-                                               rd_kafka_err2str(err));
-                                else
-                                        printf("Seeked to %"PRId64"\n",
-                                               seek_offset);
-                                seek_offset = 0;
-                        }
-		}
-
-		/* Stop consuming */
-		rd_kafka_consume_stop(rkt, partition);
-
-                while (rd_kafka_outq_len(rk) > 0)
-                        rd_kafka_poll(rk, 10);
-
-		/* Destroy topic */
-		rd_kafka_topic_destroy(rkt);
-
-		/* Destroy handle */
-		rd_kafka_destroy(rk);
-
-        } else if (mode == 'L') {
-                rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
-		/* Create Kafka handle */
-		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
-					errstr, sizeof(errstr)))) {
-			fprintf(stderr,
-				"%% Failed to create new producer: %s\n",
-				errstr);
-			exit(1);
-		}
-
-		/* Add brokers */
-		if (rd_kafka_brokers_add(rk, brokers) == 0) {
-			fprintf(stderr, "%% No valid brokers specified\n");
-			exit(1);
-		}
-
-                /* Create topic */
-                if (topic) {
-                        rkt = rd_kafka_topic_new(rk, topic, topic_conf);
-                        topic_conf = NULL; /* Now owned by topic */
-                } else
-                        rkt = NULL;
-
-                while (run) {
-                        const struct rd_kafka_metadata *metadata;
-
-                        /* Fetch metadata */
-                        err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt,
-                                                &metadata, 5000);
-                        if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
-                                fprintf(stderr,
-                                        "%% Failed to acquire metadata: %s\n",
-                                        rd_kafka_err2str(err));
-                                run = 0;
-                                break;
-                        }
-
-                        metadata_print(topic, metadata);
-
-                        rd_kafka_metadata_destroy(metadata);
-                        run = 0;
-                }
-
-		/* Destroy topic */
-		if (rkt)
-			rd_kafka_topic_destroy(rkt);
-
-		/* Destroy the handle */
-		rd_kafka_destroy(rk);
-
-                if (topic_conf)
-                        rd_kafka_topic_conf_destroy(topic_conf);
-
-
-                /* Exit right away, dont wait for background cleanup, we haven't
-                 * done anything important anyway. */
-                exit(err ? 2 : 0);
-        }
-
-        if (topic_conf)
-                rd_kafka_topic_conf_destroy(topic_conf);
-
-	/* Let background threads clean up and terminate cleanly. */
-	run = 5;
-	while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1)
-		printf("Waiting for librdkafka to decommission\n");
-	if (run <= 0)
-		rd_kafka_dump(stdout, rk);
-
-	return 0;
-}


[03/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/CMakeLists.txt b/thirdparty/librdkafka-0.11.4/src-cpp/CMakeLists.txt
new file mode 100644
index 0000000..db71516
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/CMakeLists.txt
@@ -0,0 +1,35 @@
+add_library(
+    rdkafka++
+    ConfImpl.cpp
+    ConsumerImpl.cpp
+    HandleImpl.cpp
+    KafkaConsumerImpl.cpp
+    MessageImpl.cpp
+    MetadataImpl.cpp
+    ProducerImpl.cpp
+    QueueImpl.cpp
+    RdKafka.cpp
+    TopicImpl.cpp
+    TopicPartitionImpl.cpp
+)
+
+target_link_libraries(rdkafka++ PUBLIC rdkafka)
+
+# Support '#include <rdkafcpp.h>'
+target_include_directories(rdkafka++ PUBLIC "$<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}>")
+if(NOT RDKAFKA_BUILD_STATIC)
+    target_compile_definitions(rdkafka++ PRIVATE LIBRDKAFKACPP_EXPORTS)
+endif()
+install(
+    TARGETS rdkafka++
+    EXPORT "${targets_export_name}"
+    LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}"
+    ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}"
+    RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
+    INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
+)
+
+install(
+    FILES "rdkafkacpp.h"
+    DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/librdkafka"
+)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/ConfImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/ConfImpl.cpp b/thirdparty/librdkafka-0.11.4/src-cpp/ConfImpl.cpp
new file mode 100644
index 0000000..709c728
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/ConfImpl.cpp
@@ -0,0 +1,89 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <list>
+
+#include "rdkafkacpp_int.h"
+
+
+
+RdKafka::ConfImpl::ConfResult RdKafka::ConfImpl::set(const std::string &name,
+						     const std::string &value,
+						     std::string &errstr) {
+  rd_kafka_conf_res_t res;
+  char errbuf[512];
+
+  if (this->conf_type_ == CONF_GLOBAL)
+    res = rd_kafka_conf_set(this->rk_conf_,
+                            name.c_str(), value.c_str(),
+                            errbuf, sizeof(errbuf));
+  else
+    res = rd_kafka_topic_conf_set(this->rkt_conf_,
+                                  name.c_str(), value.c_str(),
+                                  errbuf, sizeof(errbuf));
+
+  if (res != RD_KAFKA_CONF_OK)
+    errstr = errbuf;
+
+  return static_cast<Conf::ConfResult>(res);
+}
+
+
+std::list<std::string> *RdKafka::ConfImpl::dump () {
+
+  const char **arrc;
+  size_t cnt;
+  std::list<std::string> *arr;
+
+  if (rk_conf_)
+    arrc = rd_kafka_conf_dump(rk_conf_, &cnt);
+  else
+    arrc = rd_kafka_topic_conf_dump(rkt_conf_, &cnt);
+
+  arr = new std::list<std::string>();
+  for (int i = 0 ; i < static_cast<int>(cnt) ; i++)
+    arr->push_back(std::string(arrc[i]));
+
+  rd_kafka_conf_dump_free(arrc, cnt);
+  return arr;
+}
+
+RdKafka::Conf *RdKafka::Conf::create (ConfType type) {
+  ConfImpl *conf = new ConfImpl();
+
+  conf->conf_type_ = type;
+
+  if (type == CONF_GLOBAL)
+    conf->rk_conf_ = rd_kafka_conf_new();
+  else
+    conf->rkt_conf_ = rd_kafka_topic_conf_new();
+
+  return conf;
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/ConsumerImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/ConsumerImpl.cpp b/thirdparty/librdkafka-0.11.4/src-cpp/ConsumerImpl.cpp
new file mode 100644
index 0000000..bb46877
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/ConsumerImpl.cpp
@@ -0,0 +1,233 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <list>
+#include <cerrno>
+
+#include "rdkafkacpp_int.h"
+
+RdKafka::Consumer::~Consumer () {}
+
+RdKafka::Consumer *RdKafka::Consumer::create (RdKafka::Conf *conf,
+                                              std::string &errstr) {
+  char errbuf[512];
+  RdKafka::ConfImpl *confimpl = dynamic_cast<RdKafka::ConfImpl *>(conf);
+  RdKafka::ConsumerImpl *rkc = new RdKafka::ConsumerImpl();
+  rd_kafka_conf_t *rk_conf = NULL;
+
+  if (confimpl) {
+    if (!confimpl->rk_conf_) {
+      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+      delete rkc;
+      return NULL;
+    }
+
+    rkc->set_common_config(confimpl);
+
+    rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_);
+  }
+
+  rd_kafka_t *rk;
+  if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf,
+                          errbuf, sizeof(errbuf)))) {
+    errstr = errbuf;
+    delete rkc;
+    return NULL;
+  }
+
+  rkc->rk_ = rk;
+
+
+  return rkc;
+}
+
+int64_t RdKafka::Consumer::OffsetTail (int64_t offset) {
+  return RD_KAFKA_OFFSET_TAIL(offset);
+}
+
+RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic,
+                                                 int32_t partition,
+                                                 int64_t offset) {
+  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+
+  if (rd_kafka_consume_start(topicimpl->rkt_, partition, offset) == -1)
+    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+  return RdKafka::ERR_NO_ERROR;
+}
+
+
+RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic,
+                                                 int32_t partition,
+                                                 int64_t offset,
+                                                 Queue *queue) {
+  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+  RdKafka::QueueImpl *queueimpl = dynamic_cast<RdKafka::QueueImpl *>(queue);
+
+  if (rd_kafka_consume_start_queue(topicimpl->rkt_, partition, offset,
+                                   queueimpl->queue_) == -1)
+    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+  return RdKafka::ERR_NO_ERROR;
+}
+
+
+RdKafka::ErrorCode RdKafka::ConsumerImpl::stop (Topic *topic,
+                                                int32_t partition) {
+  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+
+  if (rd_kafka_consume_stop(topicimpl->rkt_, partition) == -1)
+    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+  return RdKafka::ERR_NO_ERROR;
+}
+
+RdKafka::ErrorCode RdKafka::ConsumerImpl::seek (Topic *topic,
+						int32_t partition,
+						int64_t offset,
+						int timeout_ms) {
+  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+
+  if (rd_kafka_seek(topicimpl->rkt_, partition, offset, timeout_ms) == -1)
+    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+  return RdKafka::ERR_NO_ERROR;
+}
+
+RdKafka::Message *RdKafka::ConsumerImpl::consume (Topic *topic,
+                                                  int32_t partition,
+                                                  int timeout_ms) {
+  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+  rd_kafka_message_t *rkmessage;
+
+  rkmessage = rd_kafka_consume(topicimpl->rkt_, partition, timeout_ms);
+  if (!rkmessage)
+    return new RdKafka::MessageImpl(topic,
+                                    static_cast<RdKafka::ErrorCode>
+                                    (rd_kafka_last_error()));
+
+  return new RdKafka::MessageImpl(topic, rkmessage);
+}
+
+namespace {
+  /* Helper struct for `consume_callback'.
+   * Encapsulates the values we need in order to call `rd_kafka_consume_callback'
+   * and keep track of the C++ callback function and `opaque' value.
+   */
+  struct ConsumerImplCallback {
+    ConsumerImplCallback(RdKafka::Topic* topic, RdKafka::ConsumeCb* cb, void* data)
+      : topic(topic), cb_cls(cb), cb_data(data) {
+    }
+    /* This function is the one we give to `rd_kafka_consume_callback', with
+     * the `opaque' pointer pointing to an instance of this struct, in which
+     * we can find the C++ callback and `cb_data'.
+     */
+    static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
+      ConsumerImplCallback *instance = static_cast<ConsumerImplCallback*>(opaque);
+      RdKafka::MessageImpl message(instance->topic, msg, false /*don't free*/);
+      instance->cb_cls->consume_cb(message, instance->cb_data);
+    }
+    RdKafka::Topic *topic;
+    RdKafka::ConsumeCb *cb_cls;
+    void *cb_data;
+  };
+}
+
+int RdKafka::ConsumerImpl::consume_callback (RdKafka::Topic* topic,
+                                             int32_t partition,
+                                             int timeout_ms,
+                                             RdKafka::ConsumeCb *consume_cb,
+                                             void *opaque) {
+  RdKafka::TopicImpl *topicimpl = static_cast<RdKafka::TopicImpl *>(topic);
+  ConsumerImplCallback context(topic, consume_cb, opaque);
+  return rd_kafka_consume_callback(topicimpl->rkt_, partition, timeout_ms,
+                                   &ConsumerImplCallback::consume_cb_trampoline, &context);
+}
+
+
+RdKafka::Message *RdKafka::ConsumerImpl::consume (Queue *queue,
+                                                  int timeout_ms) {
+  RdKafka::QueueImpl *queueimpl = dynamic_cast<RdKafka::QueueImpl *>(queue);
+  rd_kafka_message_t *rkmessage;
+
+  rkmessage = rd_kafka_consume_queue(queueimpl->queue_, timeout_ms);
+  if (!rkmessage)
+    return new RdKafka::MessageImpl(NULL,
+                                    static_cast<RdKafka::ErrorCode>
+                                    (rd_kafka_last_error()));
+  /*
+   * Recover our Topic * from the topic conf's opaque field, which we
+   * set in RdKafka::Topic::create() for just this kind of situation.
+   */
+  void *opaque = rd_kafka_topic_opaque(rkmessage->rkt);
+  Topic *topic = static_cast<Topic *>(opaque);
+
+  return new RdKafka::MessageImpl(topic, rkmessage);
+}
+
+namespace {
+  /* Helper struct for `consume_callback' with a Queue.
+   * Encapsulates the values we need in order to call `rd_kafka_consume_callback'
+   * and keep track of the C++ callback function and `opaque' value.
+   */
+  struct ConsumerImplQueueCallback {
+    ConsumerImplQueueCallback(RdKafka::ConsumeCb *cb, void *data)
+      : cb_cls(cb), cb_data(data) {
+    }
+    /* This function is the one we give to `rd_kafka_consume_callback', with
+     * the `opaque' pointer pointing to an instance of this struct, in which
+     * we can find the C++ callback and `cb_data'.
+     */
+    static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
+      ConsumerImplQueueCallback *instance = static_cast<ConsumerImplQueueCallback *>(opaque);
+      /*
+       * Recover our Topic * from the topic conf's opaque field, which we
+       * set in RdKafka::Topic::create() for just this kind of situation.
+       */
+      void *topic_opaque = rd_kafka_topic_opaque(msg->rkt);
+      RdKafka::Topic *topic = static_cast<RdKafka::Topic *>(topic_opaque);
+      RdKafka::MessageImpl message(topic, msg, false /*don't free*/);
+      instance->cb_cls->consume_cb(message, instance->cb_data);
+    }
+    RdKafka::ConsumeCb *cb_cls;
+    void *cb_data;
+  };
+}
+
+int RdKafka::ConsumerImpl::consume_callback (Queue *queue,
+                                             int timeout_ms,
+                                             RdKafka::ConsumeCb *consume_cb,
+                                             void *opaque) {
+  RdKafka::QueueImpl *queueimpl = dynamic_cast<RdKafka::QueueImpl *>(queue);
+  ConsumerImplQueueCallback context(consume_cb, opaque);
+  return rd_kafka_consume_callback_queue(queueimpl->queue_, timeout_ms,
+                                         &ConsumerImplQueueCallback::consume_cb_trampoline,
+                                         &context);
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/HandleImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/HandleImpl.cpp b/thirdparty/librdkafka-0.11.4/src-cpp/HandleImpl.cpp
new file mode 100644
index 0000000..3bdccbf
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/HandleImpl.cpp
@@ -0,0 +1,365 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <list>
+
+#include "rdkafkacpp_int.h"
+
+void RdKafka::consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
+  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+  RdKafka::Topic* topic = static_cast<Topic *>(rd_kafka_topic_opaque(msg->rkt));
+
+  RdKafka::MessageImpl message(topic, msg, false /*don't free*/);
+
+  handle->consume_cb_->consume_cb(message, opaque);
+}
+
+void RdKafka::log_cb_trampoline (const rd_kafka_t *rk, int level,
+                                 const char *fac, const char *buf) {
+  if (!rk) {
+    rd_kafka_log_print(rk, level, fac, buf);
+    return;
+  }
+
+  void *opaque = rd_kafka_opaque(rk);
+  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+
+  if (!handle->event_cb_) {
+    rd_kafka_log_print(rk, level, fac, buf);
+    return;
+  }
+
+  RdKafka::EventImpl event(RdKafka::Event::EVENT_LOG,
+                           RdKafka::ERR_NO_ERROR,
+                           static_cast<RdKafka::Event::Severity>(level),
+                           fac, buf);
+
+  handle->event_cb_->event_cb(event);
+}
+
+
+void RdKafka::error_cb_trampoline (rd_kafka_t *rk, int err,
+                                   const char *reason, void *opaque) {
+  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+
+  RdKafka::EventImpl event(RdKafka::Event::EVENT_ERROR,
+                           static_cast<RdKafka::ErrorCode>(err),
+                           RdKafka::Event::EVENT_SEVERITY_ERROR,
+                           NULL,
+                           reason);
+
+  handle->event_cb_->event_cb(event);
+}
+
+
+void RdKafka::throttle_cb_trampoline (rd_kafka_t *rk, const char *broker_name,
+				      int32_t broker_id,
+				      int throttle_time_ms,
+				      void *opaque) {
+  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+
+  RdKafka::EventImpl event(RdKafka::Event::EVENT_THROTTLE);
+  event.str_ = broker_name;
+  event.id_ = broker_id;
+  event.throttle_time_ = throttle_time_ms;
+
+  handle->event_cb_->event_cb(event);
+}
+
+
+int RdKafka::stats_cb_trampoline (rd_kafka_t *rk, char *json, size_t json_len,
+                                  void *opaque) {
+  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+
+  RdKafka::EventImpl event(RdKafka::Event::EVENT_STATS,
+                           RdKafka::ERR_NO_ERROR,
+                           RdKafka::Event::EVENT_SEVERITY_INFO,
+                           NULL, json);
+
+  handle->event_cb_->event_cb(event);
+
+  return 0;
+}
+
+
+int RdKafka::socket_cb_trampoline (int domain, int type, int protocol,
+                                   void *opaque) {
+  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+
+  return handle->socket_cb_->socket_cb(domain, type, protocol);
+}
+
+int RdKafka::open_cb_trampoline (const char *pathname, int flags, mode_t mode,
+                                 void *opaque) {
+  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+
+  return handle->open_cb_->open_cb(pathname, flags, static_cast<int>(mode));
+}
+
+RdKafka::ErrorCode RdKafka::HandleImpl::metadata (bool all_topics,
+                                                  const Topic *only_rkt,
+                                                  Metadata **metadatap, 
+                                                  int timeout_ms) {
+
+  const rd_kafka_metadata_t *cmetadatap=NULL;
+
+  rd_kafka_topic_t *topic = only_rkt ? 
+    static_cast<const TopicImpl *>(only_rkt)->rkt_ : NULL;
+
+  const rd_kafka_resp_err_t rc = rd_kafka_metadata(rk_, all_topics, topic,
+                                                   &cmetadatap,timeout_ms);
+
+  *metadatap = (rc == RD_KAFKA_RESP_ERR_NO_ERROR) ? 
+    new RdKafka::MetadataImpl(cmetadatap) : NULL;
+
+  return static_cast<RdKafka::ErrorCode>(rc);
+}
+
+/**
+ * Convert a list of C partitions to C++ partitions
+ */
+static void c_parts_to_partitions (const rd_kafka_topic_partition_list_t
+                                   *c_parts,
+                                   std::vector<RdKafka::TopicPartition*>
+                                   &partitions) {
+  partitions.resize(c_parts->cnt);
+  for (int i = 0 ; i < c_parts->cnt ; i++)
+    partitions[i] = new RdKafka::TopicPartitionImpl(&c_parts->elems[i]);
+}
+
+static void free_partition_vector (std::vector<RdKafka::TopicPartition*> &v) {
+  for (unsigned int i = 0 ; i < v.size() ; i++)
+    delete v[i];
+  v.clear();
+}
+
+void
+RdKafka::rebalance_cb_trampoline (rd_kafka_t *rk,
+                                  rd_kafka_resp_err_t err,
+                                  rd_kafka_topic_partition_list_t *c_partitions,
+                                  void *opaque) {
+  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+  std::vector<RdKafka::TopicPartition*> partitions;
+
+  c_parts_to_partitions(c_partitions, partitions);
+
+  handle->rebalance_cb_->rebalance_cb(
+				      dynamic_cast<RdKafka::KafkaConsumer*>(handle),
+				      static_cast<RdKafka::ErrorCode>(err),
+				      partitions);
+
+  free_partition_vector(partitions);
+}
+
+
+void
+RdKafka::offset_commit_cb_trampoline0 (
+    rd_kafka_t *rk,
+    rd_kafka_resp_err_t err,
+    rd_kafka_topic_partition_list_t *c_offsets, void *opaque) {
+  OffsetCommitCb *cb = static_cast<RdKafka::OffsetCommitCb *>(opaque);
+  std::vector<RdKafka::TopicPartition*> offsets;
+
+  if (c_offsets)
+    c_parts_to_partitions(c_offsets, offsets);
+
+  cb->offset_commit_cb(static_cast<RdKafka::ErrorCode>(err), offsets);
+
+  free_partition_vector(offsets);
+}
+
+static void
+offset_commit_cb_trampoline (
+    rd_kafka_t *rk,
+    rd_kafka_resp_err_t err,
+    rd_kafka_topic_partition_list_t *c_offsets, void *opaque) {
+  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+  RdKafka::offset_commit_cb_trampoline0(rk, err, c_offsets,
+                                        handle->offset_commit_cb_);
+}
+
+
+void RdKafka::HandleImpl::set_common_config (RdKafka::ConfImpl *confimpl) {
+
+  rd_kafka_conf_set_opaque(confimpl->rk_conf_, this);
+
+  if (confimpl->event_cb_) {
+    rd_kafka_conf_set_log_cb(confimpl->rk_conf_,
+                             RdKafka::log_cb_trampoline);
+    rd_kafka_conf_set_error_cb(confimpl->rk_conf_,
+                               RdKafka::error_cb_trampoline);
+    rd_kafka_conf_set_throttle_cb(confimpl->rk_conf_,
+				  RdKafka::throttle_cb_trampoline);
+    rd_kafka_conf_set_stats_cb(confimpl->rk_conf_,
+                               RdKafka::stats_cb_trampoline);
+    event_cb_ = confimpl->event_cb_;
+  }
+
+  if (confimpl->socket_cb_) {
+    rd_kafka_conf_set_socket_cb(confimpl->rk_conf_,
+                                RdKafka::socket_cb_trampoline);
+    socket_cb_ = confimpl->socket_cb_;
+  }
+
+  if (confimpl->open_cb_) {
+#ifndef _MSC_VER
+    rd_kafka_conf_set_open_cb(confimpl->rk_conf_, RdKafka::open_cb_trampoline);
+    open_cb_ = confimpl->open_cb_;
+#endif
+  }
+
+  if (confimpl->rebalance_cb_) {
+    rd_kafka_conf_set_rebalance_cb(confimpl->rk_conf_,
+                                   RdKafka::rebalance_cb_trampoline);
+    rebalance_cb_ = confimpl->rebalance_cb_;
+  }
+
+  if (confimpl->offset_commit_cb_) {
+    rd_kafka_conf_set_offset_commit_cb(confimpl->rk_conf_,
+                                       offset_commit_cb_trampoline);
+    offset_commit_cb_ = confimpl->offset_commit_cb_;
+  }
+
+  if (confimpl->consume_cb_) {
+    rd_kafka_conf_set_consume_cb(confimpl->rk_conf_,
+                                 RdKafka::consume_cb_trampoline);
+    consume_cb_ = confimpl->consume_cb_;
+  }
+
+}
+
+
+RdKafka::ErrorCode
+RdKafka::HandleImpl::pause (std::vector<RdKafka::TopicPartition*> &partitions) {
+  rd_kafka_topic_partition_list_t *c_parts;
+  rd_kafka_resp_err_t err;
+
+  c_parts = partitions_to_c_parts(partitions);
+
+  err = rd_kafka_pause_partitions(rk_, c_parts);
+
+  if (!err)
+    update_partitions_from_c_parts(partitions, c_parts);
+
+  rd_kafka_topic_partition_list_destroy(c_parts);
+
+  return static_cast<RdKafka::ErrorCode>(err);
+}
+
+
+RdKafka::ErrorCode
+RdKafka::HandleImpl::resume (std::vector<RdKafka::TopicPartition*> &partitions) {
+  rd_kafka_topic_partition_list_t *c_parts;
+  rd_kafka_resp_err_t err;
+
+  c_parts = partitions_to_c_parts(partitions);
+
+  err = rd_kafka_resume_partitions(rk_, c_parts);
+
+  if (!err)
+    update_partitions_from_c_parts(partitions, c_parts);
+
+  rd_kafka_topic_partition_list_destroy(c_parts);
+
+  return static_cast<RdKafka::ErrorCode>(err);
+}
+
+RdKafka::Queue *
+RdKafka::HandleImpl::get_partition_queue (const TopicPartition *part) {
+  rd_kafka_queue_t *rkqu;
+  rkqu = rd_kafka_queue_get_partition(rk_,
+                                      part->topic().c_str(),
+                                      part->partition());
+
+  if (rkqu == NULL)
+    return NULL;
+
+  RdKafka::QueueImpl *queueimpl = new RdKafka::QueueImpl;
+  queueimpl->queue_ = rkqu;
+
+  return queueimpl;
+}
+
+RdKafka::ErrorCode
+RdKafka::HandleImpl::set_log_queue (RdKafka::Queue *queue) {
+        rd_kafka_queue_t *rkqu = NULL;
+        if (queue) {
+                QueueImpl *queueimpl = dynamic_cast<QueueImpl *>(queue);
+                rkqu = queueimpl->queue_;
+        }
+        return static_cast<RdKafka::ErrorCode>(
+                rd_kafka_set_log_queue(rk_, rkqu));
+}
+
+namespace RdKafka {
+
+rd_kafka_topic_partition_list_t *
+partitions_to_c_parts (const std::vector<RdKafka::TopicPartition*> &partitions){
+  rd_kafka_topic_partition_list_t *c_parts;
+
+  c_parts = rd_kafka_topic_partition_list_new((int)partitions.size());
+
+  for (unsigned int i = 0 ; i < partitions.size() ; i++) {
+    const RdKafka::TopicPartitionImpl *tpi =
+        dynamic_cast<const RdKafka::TopicPartitionImpl*>(partitions[i]);
+    rd_kafka_topic_partition_t *rktpar =
+      rd_kafka_topic_partition_list_add(c_parts,
+					tpi->topic_.c_str(), tpi->partition_);
+    rktpar->offset = tpi->offset_;
+  }
+
+  return c_parts;
+}
+
+
+/**
+ * @brief Update the application provided 'partitions' with info from 'c_parts'
+ */
+void
+update_partitions_from_c_parts (std::vector<RdKafka::TopicPartition*> &partitions,
+				const rd_kafka_topic_partition_list_t *c_parts) {
+  for (int i = 0 ; i < c_parts->cnt ; i++) {
+    rd_kafka_topic_partition_t *p = &c_parts->elems[i];
+
+    /* Find corresponding C++ entry */
+    for (unsigned int j = 0 ; j < partitions.size() ; j++) {
+      RdKafka::TopicPartitionImpl *pp =
+	dynamic_cast<RdKafka::TopicPartitionImpl*>(partitions[j]);
+      if (!strcmp(p->topic, pp->topic_.c_str()) &&
+	  p->partition == pp->partition_) {
+	pp->offset_ = p->offset;
+	pp->err_ = static_cast<RdKafka::ErrorCode>(p->err);
+      }
+    }
+  }
+}
+
+};
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/KafkaConsumerImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/KafkaConsumerImpl.cpp b/thirdparty/librdkafka-0.11.4/src-cpp/KafkaConsumerImpl.cpp
new file mode 100644
index 0000000..f4e79d3
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/KafkaConsumerImpl.cpp
@@ -0,0 +1,257 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+#include <vector>
+
+#include "rdkafkacpp_int.h"
+
+RdKafka::KafkaConsumer::~KafkaConsumer () {}
+
+RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (RdKafka::Conf *conf,
+                                                        std::string &errstr) {
+  char errbuf[512];
+  RdKafka::ConfImpl *confimpl = dynamic_cast<RdKafka::ConfImpl *>(conf);
+  RdKafka::KafkaConsumerImpl *rkc = new RdKafka::KafkaConsumerImpl();
+  rd_kafka_conf_t *rk_conf = NULL;
+  size_t grlen;
+
+  if (!confimpl->rk_conf_) {
+    errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+    delete rkc;
+    return NULL;
+  }
+
+  if (rd_kafka_conf_get(confimpl->rk_conf_, "group.id",
+                        NULL, &grlen) != RD_KAFKA_CONF_OK ||
+      grlen <= 1 /* terminating null only */) {
+    errstr = "\"group.id\" must be configured";
+    delete rkc;
+    return NULL;
+  }
+
+  rkc->set_common_config(confimpl);
+
+  rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_);
+
+  rd_kafka_t *rk;
+  if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf,
+                          errbuf, sizeof(errbuf)))) {
+    errstr = errbuf;
+    delete rkc;
+    return NULL;
+  }
+
+  rkc->rk_ = rk;
+
+  /* Redirect handle queue to cgrp's queue to provide a single queue point */
+  rd_kafka_poll_set_consumer(rk);
+
+  return rkc;
+}
+
+
+
+
+
+
+
+RdKafka::ErrorCode
+RdKafka::KafkaConsumerImpl::subscribe (const std::vector<std::string> &topics) {
+  rd_kafka_topic_partition_list_t *c_topics;
+  rd_kafka_resp_err_t err;
+
+  c_topics = rd_kafka_topic_partition_list_new((int)topics.size());
+
+  for (unsigned int i = 0 ; i < topics.size() ; i++)
+    rd_kafka_topic_partition_list_add(c_topics, topics[i].c_str(),
+                                      RD_KAFKA_PARTITION_UA);
+
+  err = rd_kafka_subscribe(rk_, c_topics);
+
+  rd_kafka_topic_partition_list_destroy(c_topics);
+
+  return static_cast<RdKafka::ErrorCode>(err);
+}
+
+
+
+RdKafka::ErrorCode
+RdKafka::KafkaConsumerImpl::unsubscribe () {
+  return static_cast<RdKafka::ErrorCode>(rd_kafka_unsubscribe(this->rk_));
+}
+
+RdKafka::Message *RdKafka::KafkaConsumerImpl::consume (int timeout_ms) {
+  rd_kafka_message_t *rkmessage;
+
+  rkmessage = rd_kafka_consumer_poll(this->rk_, timeout_ms);
+
+  if (!rkmessage)
+    return new RdKafka::MessageImpl(NULL, RdKafka::ERR__TIMED_OUT);
+
+  return new RdKafka::MessageImpl(rkmessage);
+
+}
+
+
+
+RdKafka::ErrorCode
+RdKafka::KafkaConsumerImpl::assignment (std::vector<RdKafka::TopicPartition*> &partitions) {
+  rd_kafka_topic_partition_list_t *c_parts;
+  rd_kafka_resp_err_t err;
+
+  if ((err = rd_kafka_assignment(rk_, &c_parts)))
+    return static_cast<RdKafka::ErrorCode>(err);
+
+  partitions.resize(c_parts->cnt);
+
+  for (int i = 0 ; i < c_parts->cnt ; i++)
+    partitions[i] = new RdKafka::TopicPartitionImpl(&c_parts->elems[i]);
+
+  rd_kafka_topic_partition_list_destroy(c_parts);
+
+  return RdKafka::ERR_NO_ERROR;
+}
+
+
+RdKafka::ErrorCode
+RdKafka::KafkaConsumerImpl::subscription (std::vector<std::string> &topics) {
+  rd_kafka_topic_partition_list_t *c_topics;
+  rd_kafka_resp_err_t err;
+
+  if ((err = rd_kafka_subscription(rk_, &c_topics)))
+    return static_cast<RdKafka::ErrorCode>(err);
+
+  topics.resize(c_topics->cnt);
+  for (int i = 0 ; i < c_topics->cnt ; i++)
+    topics[i] = std::string(c_topics->elems[i].topic);
+
+  rd_kafka_topic_partition_list_destroy(c_topics);
+
+  return RdKafka::ERR_NO_ERROR;
+}
+
+
+RdKafka::ErrorCode
+RdKafka::KafkaConsumerImpl::assign (const std::vector<TopicPartition*> &partitions) {
+  rd_kafka_topic_partition_list_t *c_parts;
+  rd_kafka_resp_err_t err;
+
+  c_parts = partitions_to_c_parts(partitions);
+
+  err = rd_kafka_assign(rk_, c_parts);
+
+  rd_kafka_topic_partition_list_destroy(c_parts);
+  return static_cast<RdKafka::ErrorCode>(err);
+}
+
+
+RdKafka::ErrorCode
+RdKafka::KafkaConsumerImpl::unassign () {
+  return static_cast<RdKafka::ErrorCode>(rd_kafka_assign(rk_, NULL));
+}
+
+
+RdKafka::ErrorCode
+RdKafka::KafkaConsumerImpl::committed (std::vector<RdKafka::TopicPartition*> &partitions, int timeout_ms) {
+  rd_kafka_topic_partition_list_t *c_parts;
+  rd_kafka_resp_err_t err;
+
+  c_parts = partitions_to_c_parts(partitions);
+
+  err = rd_kafka_committed(rk_, c_parts, timeout_ms);
+
+  if (!err) {
+    update_partitions_from_c_parts(partitions, c_parts);
+  }
+
+  rd_kafka_topic_partition_list_destroy(c_parts);
+
+  return static_cast<RdKafka::ErrorCode>(err);
+}
+
+
+RdKafka::ErrorCode
+RdKafka::KafkaConsumerImpl::position (std::vector<RdKafka::TopicPartition*> &partitions) {
+  rd_kafka_topic_partition_list_t *c_parts;
+  rd_kafka_resp_err_t err;
+
+  c_parts = partitions_to_c_parts(partitions);
+
+  err = rd_kafka_position(rk_, c_parts);
+
+  if (!err) {
+    update_partitions_from_c_parts(partitions, c_parts);
+  }
+
+  rd_kafka_topic_partition_list_destroy(c_parts);
+
+  return static_cast<RdKafka::ErrorCode>(err);
+}
+
+
+RdKafka::ErrorCode
+RdKafka::KafkaConsumerImpl::seek (const RdKafka::TopicPartition &partition,
+                                  int timeout_ms) {
+  const RdKafka::TopicPartitionImpl *p =
+    dynamic_cast<const RdKafka::TopicPartitionImpl*>(&partition);
+  rd_kafka_topic_t *rkt;
+
+  if (!(rkt = rd_kafka_topic_new(rk_, p->topic_.c_str(), NULL)))
+    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+  /* FIXME: Use a C API that takes a topic_partition_list_t instead */
+  RdKafka::ErrorCode err =
+    static_cast<RdKafka::ErrorCode>
+    (rd_kafka_seek(rkt, p->partition_, p->offset_, timeout_ms));
+
+  rd_kafka_topic_destroy(rkt);
+
+  return err;
+}
+
+
+
+
+
+RdKafka::ErrorCode
+RdKafka::KafkaConsumerImpl::close () {
+  rd_kafka_resp_err_t err;
+  err = rd_kafka_consumer_close(rk_);
+  if (err)
+    return static_cast<RdKafka::ErrorCode>(err);
+
+  while (rd_kafka_outq_len(rk_) > 0)
+    rd_kafka_poll(rk_, 10);
+  rd_kafka_destroy(rk_);
+
+  return static_cast<RdKafka::ErrorCode>(err);
+}
+
+
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/Makefile
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/Makefile b/thirdparty/librdkafka-0.11.4/src-cpp/Makefile
new file mode 100644
index 0000000..16f20b0
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/Makefile
@@ -0,0 +1,49 @@
+PKGNAME=	librdkafka
+LIBNAME=	librdkafka++
+LIBVER=		1
+
+CXXSRCS=	RdKafka.cpp ConfImpl.cpp HandleImpl.cpp \
+		ConsumerImpl.cpp ProducerImpl.cpp KafkaConsumerImpl.cpp \
+		TopicImpl.cpp TopicPartitionImpl.cpp MessageImpl.cpp \
+		QueueImpl.cpp MetadataImpl.cpp
+
+HDRS=		rdkafkacpp.h
+
+OBJS=		$(CXXSRCS:%.cpp=%.o)
+
+
+
+all: lib check
+
+
+include ../mklove/Makefile.base
+
+# No linker script/symbol hiding for C++ library
+WITH_LDS=n
+
+# OSX and Cygwin requires linking required libraries
+ifeq ($(_UNAME_S),Darwin)
+	FWD_LINKING_REQ=y
+endif
+ifeq ($(_UNAME_S),AIX)
+	FWD_LINKING_REQ=y
+endif
+ifeq ($(shell uname -o 2>/dev/null),Cygwin)
+	FWD_LINKING_REQ=y
+endif
+
+# Ignore previously defined library dependencies for the C library,
+# we'll get those dependencies through the C library linkage.
+LIBS := -L../src -lrdkafka -lstdc++
+
+CHECK_FILES+= $(LIBFILENAME) $(LIBNAME).a
+
+
+file-check: lib
+check: file-check
+
+install: lib-install
+
+clean: lib-clean
+
+-include $(DEPS)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/MessageImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/MessageImpl.cpp b/thirdparty/librdkafka-0.11.4/src-cpp/MessageImpl.cpp
new file mode 100644
index 0000000..9562402
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/MessageImpl.cpp
@@ -0,0 +1,38 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <list>
+#include <cerrno>
+
+#include "rdkafkacpp_int.h"
+
+
+RdKafka::Message::~Message() {}
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/MetadataImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/MetadataImpl.cpp b/thirdparty/librdkafka-0.11.4/src-cpp/MetadataImpl.cpp
new file mode 100644
index 0000000..c2869f5
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/MetadataImpl.cpp
@@ -0,0 +1,151 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafkacpp_int.h"
+
+using namespace RdKafka;
+
+BrokerMetadata::~BrokerMetadata() {};
+PartitionMetadata::~PartitionMetadata() {};
+TopicMetadata::~TopicMetadata() {};
+Metadata::~Metadata() {};
+
+
+/**
+ * Metadata: Broker information handler implementation
+ */
+class BrokerMetadataImpl : public BrokerMetadata {
+ public:
+  BrokerMetadataImpl(const rd_kafka_metadata_broker_t *broker_metadata)
+  :broker_metadata_(broker_metadata),host_(broker_metadata->host) {}
+
+  int32_t      id() const{return broker_metadata_->id;}
+
+  const std::string host() const {return host_;}
+  int port() const {return broker_metadata_->port;}
+
+  virtual ~BrokerMetadataImpl() {}
+
+ private:
+  const rd_kafka_metadata_broker_t *broker_metadata_;
+  const std::string host_;
+};
+
+/**
+ * Metadata: Partition information handler
+ */
+class PartitionMetadataImpl : public PartitionMetadata {
+ public:
+  // @TODO too much memory copy? maybe we should create a new vector class that read directly from C arrays?
+  // @TODO use auto_ptr?
+  PartitionMetadataImpl(const rd_kafka_metadata_partition_t *partition_metadata)
+  :partition_metadata_(partition_metadata) {
+    replicas_.reserve(partition_metadata->replica_cnt);
+    for(int i=0;i<partition_metadata->replica_cnt;++i)
+      replicas_.push_back(partition_metadata->replicas[i]);
+
+    isrs_.reserve(partition_metadata->isr_cnt);
+    for(int i=0;i<partition_metadata->isr_cnt;++i)
+      isrs_.push_back(partition_metadata->isrs[i]);
+  }
+
+  int32_t                    id() const {
+    return partition_metadata_->id;
+  }
+  int32_t                    leader() const {
+    return partition_metadata_->leader;
+  }
+  ErrorCode                  err() const {
+    return static_cast<ErrorCode>(partition_metadata_->err);
+  }
+
+  const std::vector<int32_t> *replicas() const {return &replicas_;}
+  const std::vector<int32_t> *isrs() const {return &isrs_;}
+
+  ~PartitionMetadataImpl() {};
+
+ private:
+  const rd_kafka_metadata_partition_t *partition_metadata_;
+  std::vector<int32_t> replicas_,isrs_;
+};
+
+/**
+ * Metadata: Topic information handler
+ */
+class TopicMetadataImpl : public TopicMetadata{
+ public:
+  TopicMetadataImpl(const rd_kafka_metadata_topic_t *topic_metadata)
+  :topic_metadata_(topic_metadata),topic_(topic_metadata->topic) {
+    partitions_.reserve(topic_metadata->partition_cnt);
+    for(int i=0;i<topic_metadata->partition_cnt;++i)
+      partitions_.push_back(
+        new PartitionMetadataImpl(&topic_metadata->partitions[i])
+      );
+  }
+
+  ~TopicMetadataImpl(){
+    for(size_t i=0;i<partitions_.size();++i)
+      delete partitions_[i];
+  }
+
+  const std::string topic() const {return topic_;}
+  const std::vector<const PartitionMetadata *> *partitions() const {
+    return &partitions_;
+  }
+  ErrorCode err() const {return static_cast<ErrorCode>(topic_metadata_->err);}
+
+ private:
+  const rd_kafka_metadata_topic_t *topic_metadata_;
+  const std::string topic_;
+  std::vector<const PartitionMetadata *> partitions_;
+
+};
+
+MetadataImpl::MetadataImpl(const rd_kafka_metadata_t *metadata)
+:metadata_(metadata)
+{
+  brokers_.reserve(metadata->broker_cnt);
+  for(int i=0;i<metadata->broker_cnt;++i)
+    brokers_.push_back(new BrokerMetadataImpl(&metadata->brokers[i]));
+
+  topics_.reserve(metadata->topic_cnt);
+  for(int i=0;i<metadata->topic_cnt;++i)
+    topics_.push_back(new TopicMetadataImpl(&metadata->topics[i]));
+
+}
+
+MetadataImpl::~MetadataImpl() {
+  for(size_t i=0;i<brokers_.size();++i)
+    delete brokers_[i];
+  for(size_t i=0;i<topics_.size();++i)
+    delete topics_[i];
+
+
+  if(metadata_)
+    rd_kafka_metadata_destroy(metadata_);
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/ProducerImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/ProducerImpl.cpp b/thirdparty/librdkafka-0.11.4/src-cpp/ProducerImpl.cpp
new file mode 100644
index 0000000..456bc33
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/ProducerImpl.cpp
@@ -0,0 +1,167 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <list>
+#include <cerrno>
+
+#include "rdkafkacpp_int.h"
+
+
+RdKafka::Producer::~Producer () {
+
+}
+
+static void dr_msg_cb_trampoline (rd_kafka_t *rk,
+                                  const rd_kafka_message_t *
+                                  rkmessage,
+                                  void *opaque) {
+  RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+  RdKafka::MessageImpl message(NULL, (rd_kafka_message_t *)rkmessage, false);
+  handle->dr_cb_->dr_cb(message);
+}
+
+
+
+RdKafka::Producer *RdKafka::Producer::create (RdKafka::Conf *conf,
+                                              std::string &errstr) {
+  char errbuf[512];
+  RdKafka::ConfImpl *confimpl = dynamic_cast<RdKafka::ConfImpl *>(conf);
+  RdKafka::ProducerImpl *rkp = new RdKafka::ProducerImpl();
+  rd_kafka_conf_t *rk_conf = NULL;
+
+  if (confimpl) {
+    if (!confimpl->rk_conf_) {
+      errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+      delete rkp;
+      return NULL;
+    }
+
+    rkp->set_common_config(confimpl);
+
+    rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_);
+
+    if (confimpl->dr_cb_) {
+      rd_kafka_conf_set_dr_msg_cb(rk_conf, dr_msg_cb_trampoline);
+      rkp->dr_cb_ = confimpl->dr_cb_;
+    }
+  }
+
+
+  rd_kafka_t *rk;
+  if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, rk_conf,
+                          errbuf, sizeof(errbuf)))) {
+    errstr = errbuf;
+    delete rkp;
+    return NULL;
+  }
+
+  rkp->rk_ = rk;
+
+  return rkp;
+}
+
+
+RdKafka::ErrorCode RdKafka::ProducerImpl::produce (RdKafka::Topic *topic,
+                                                   int32_t partition,
+                                                   int msgflags,
+                                                   void *payload, size_t len,
+                                                   const std::string *key,
+                                                   void *msg_opaque) {
+  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+
+  if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags,
+                       payload, len,
+                       key ? key->c_str() : NULL, key ? key->size() : 0,
+                       msg_opaque) == -1)
+    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+  return RdKafka::ERR_NO_ERROR;
+}
+
+
+RdKafka::ErrorCode RdKafka::ProducerImpl::produce (RdKafka::Topic *topic,
+                                                   int32_t partition,
+                                                   int msgflags,
+                                                   void *payload, size_t len,
+                                                   const void *key,
+                                                   size_t key_len,
+                                                   void *msg_opaque) {
+  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+
+  if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags,
+                       payload, len, key, key_len,
+                       msg_opaque) == -1)
+    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+  return RdKafka::ERR_NO_ERROR;
+}
+
+
+RdKafka::ErrorCode
+RdKafka::ProducerImpl::produce (RdKafka::Topic *topic,
+                                int32_t partition,
+                                const std::vector<char> *payload,
+                                const std::vector<char> *key,
+                                void *msg_opaque) {
+  RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+
+  if (rd_kafka_produce(topicimpl->rkt_, partition, RD_KAFKA_MSG_F_COPY,
+                       payload ? (void *)&(*payload)[0] : NULL,
+                       payload ? payload->size() : 0,
+                       key ? &(*key)[0] : NULL, key ? key->size() : 0,
+                       msg_opaque) == -1)
+    return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+  return RdKafka::ERR_NO_ERROR;
+
+}
+
+
+RdKafka::ErrorCode
+RdKafka::ProducerImpl::produce (const std::string topic_name,
+                                int32_t partition, int msgflags,
+                                void *payload, size_t len,
+                                const void *key, size_t key_len,
+                                int64_t timestamp,
+                                void *msg_opaque) {
+  return
+    static_cast<RdKafka::ErrorCode>
+    (
+     rd_kafka_producev(rk_,
+                       RD_KAFKA_V_TOPIC(topic_name.c_str()),
+                       RD_KAFKA_V_PARTITION(partition),
+                       RD_KAFKA_V_MSGFLAGS(msgflags),
+                       RD_KAFKA_V_VALUE(payload, len),
+                       RD_KAFKA_V_KEY(key, key_len),
+                       RD_KAFKA_V_TIMESTAMP(timestamp),
+                       RD_KAFKA_V_OPAQUE(msg_opaque),
+                       RD_KAFKA_V_END)
+     );
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/QueueImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/QueueImpl.cpp b/thirdparty/librdkafka-0.11.4/src-cpp/QueueImpl.cpp
new file mode 100644
index 0000000..1d8ce93
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/QueueImpl.cpp
@@ -0,0 +1,71 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cerrno>
+
+#include "rdkafkacpp_int.h"
+
+RdKafka::Queue::~Queue () {
+
+}
+
+RdKafka::Queue *RdKafka::Queue::create (Handle *base) {
+  RdKafka::QueueImpl *queueimpl = new RdKafka::QueueImpl;
+  queueimpl->queue_ = rd_kafka_queue_new(dynamic_cast<HandleImpl*>(base)->rk_);
+  return queueimpl;
+}
+
+RdKafka::ErrorCode
+RdKafka::QueueImpl::forward (Queue *queue) {
+  if (!queue) {
+    rd_kafka_queue_forward(queue_, NULL);
+  } else {
+    QueueImpl *queueimpl = dynamic_cast<QueueImpl *>(queue);
+    rd_kafka_queue_forward(queue_, queueimpl->queue_);
+  }
+  return RdKafka::ERR_NO_ERROR;
+}
+
+RdKafka::Message *RdKafka::QueueImpl::consume (int timeout_ms) {
+  rd_kafka_message_t *rkmessage;
+  rkmessage = rd_kafka_consume_queue(queue_, timeout_ms);
+
+  if (!rkmessage)
+    return new RdKafka::MessageImpl(NULL, RdKafka::ERR__TIMED_OUT);
+
+  return new RdKafka::MessageImpl(rkmessage);
+}
+
+int RdKafka::QueueImpl::poll (int timeout_ms) {
+        return rd_kafka_queue_poll_callback(queue_, timeout_ms);
+}
+
+void RdKafka::QueueImpl::io_event_enable (int fd, const void *payload,
+                                          size_t size) {
+        rd_kafka_queue_io_event_enable(queue_, fd, payload, size);
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/README.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/README.md b/thirdparty/librdkafka-0.11.4/src-cpp/README.md
new file mode 100644
index 0000000..a484589
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/README.md
@@ -0,0 +1,16 @@
+librdkafka C++ interface
+========================
+
+**See rdkafkacpp.h for the public C++ API**
+
+
+
+Maintainer notes for the C++ interface:
+
+ * The public C++ interface (rdkafkacpp.h) does not include the
+   public C interface (rdkafka.h) in any way, this means that all
+   constants, flags, etc, must be kept in sync manually between the two
+   header files.
+   A regression test should be implemented that checks this is true.
+
+ * The public C++ interface is provided using pure virtual abstract classes.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/RdKafka.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/RdKafka.cpp b/thirdparty/librdkafka-0.11.4/src-cpp/RdKafka.cpp
new file mode 100644
index 0000000..7b67a7b
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/RdKafka.cpp
@@ -0,0 +1,52 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+
+#include "rdkafkacpp_int.h"
+
+int RdKafka::version () {
+  return rd_kafka_version();
+}
+
+std::string RdKafka::version_str () {
+  return std::string(rd_kafka_version_str());
+}
+
+std::string RdKafka::get_debug_contexts() {
+	return std::string(RD_KAFKA_DEBUG_CONTEXTS);
+}
+
+std::string RdKafka::err2str (RdKafka::ErrorCode err) {
+  return std::string(rd_kafka_err2str(static_cast<rd_kafka_resp_err_t>(err)));
+}
+
+int RdKafka::wait_destroyed (int timeout_ms) {
+  return rd_kafka_wait_destroyed(timeout_ms);
+}
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/TopicImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/TopicImpl.cpp b/thirdparty/librdkafka-0.11.4/src-cpp/TopicImpl.cpp
new file mode 100644
index 0000000..cd80a4b
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/TopicImpl.cpp
@@ -0,0 +1,128 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <list>
+#include <cerrno>
+
+#include "rdkafkacpp_int.h"
+
+const int32_t RdKafka::Topic::PARTITION_UA = RD_KAFKA_PARTITION_UA;
+
+const int64_t RdKafka::Topic::OFFSET_BEGINNING = RD_KAFKA_OFFSET_BEGINNING;
+
+const int64_t RdKafka::Topic::OFFSET_END = RD_KAFKA_OFFSET_END;
+
+const int64_t RdKafka::Topic::OFFSET_STORED = RD_KAFKA_OFFSET_STORED;
+
+const int64_t RdKafka::Topic::OFFSET_INVALID = RD_KAFKA_OFFSET_INVALID;
+
+RdKafka::Topic::~Topic () {
+
+}
+
+static int32_t partitioner_cb_trampoline (const rd_kafka_topic_t *rkt,
+                                          const void *keydata,
+                                          size_t keylen,
+                                          int32_t partition_cnt,
+                                          void *rkt_opaque,
+                                          void *msg_opaque) {
+  RdKafka::TopicImpl *topicimpl = static_cast<RdKafka::TopicImpl *>(rkt_opaque);
+  std::string key(static_cast<const char *>(keydata), keylen);
+  return topicimpl->partitioner_cb_->partitioner_cb(topicimpl, &key,
+                                                    partition_cnt, msg_opaque);
+}
+
+static int32_t partitioner_kp_cb_trampoline (const rd_kafka_topic_t *rkt,
+                                             const void *keydata,
+                                             size_t keylen,
+                                             int32_t partition_cnt,
+                                             void *rkt_opaque,
+                                             void *msg_opaque) {
+  RdKafka::TopicImpl *topicimpl = static_cast<RdKafka::TopicImpl *>(rkt_opaque);
+  return topicimpl->partitioner_kp_cb_->partitioner_cb(topicimpl,
+                                                       keydata, keylen,
+                                                       partition_cnt,
+                                                       msg_opaque);
+}
+
+
+
+RdKafka::Topic *RdKafka::Topic::create (Handle *base,
+					const std::string &topic_str,
+					Conf *conf,
+					std::string &errstr) {
+  RdKafka::ConfImpl *confimpl = static_cast<RdKafka::ConfImpl *>(conf);
+  rd_kafka_topic_t *rkt;
+  rd_kafka_topic_conf_t *rkt_conf;
+  rd_kafka_t *rk = dynamic_cast<HandleImpl*>(base)->rk_;
+
+  RdKafka::TopicImpl *topic = new RdKafka::TopicImpl();
+
+  if (!confimpl) {
+    /* Reuse default topic config, but we need our own copy to
+     * set the topic opaque. */
+    rkt_conf = rd_kafka_default_topic_conf_dup(rk);
+  } else {
+    /* Make a copy of conf struct to allow Conf reuse. */
+    rkt_conf = rd_kafka_topic_conf_dup(confimpl->rkt_conf_);
+  }
+
+  /* Set topic opaque to the topic so that we can reach our topic object
+   * from whatever callbacks get registered.
+   * The application itself will not need these opaques since their
+   * callbacks are class based. */
+  rd_kafka_topic_conf_set_opaque(rkt_conf, static_cast<void *>(topic));
+
+  if (confimpl) {
+    if (confimpl->partitioner_cb_) {
+      rd_kafka_topic_conf_set_partitioner_cb(rkt_conf,
+                                             partitioner_cb_trampoline);
+      topic->partitioner_cb_ = confimpl->partitioner_cb_;
+    } else if (confimpl->partitioner_kp_cb_) {
+      rd_kafka_topic_conf_set_partitioner_cb(rkt_conf,
+                                             partitioner_kp_cb_trampoline);
+      topic->partitioner_kp_cb_ = confimpl->partitioner_kp_cb_;
+    }
+  }
+
+
+  if (!(rkt = rd_kafka_topic_new(rk, topic_str.c_str(), rkt_conf))) {
+    errstr = rd_kafka_err2str(rd_kafka_last_error());
+    delete topic;
+    rd_kafka_topic_conf_destroy(rkt_conf);
+    return NULL;
+  }
+
+  topic->rkt_ = rkt;
+
+  return topic;
+
+}
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/src-cpp/TopicPartitionImpl.cpp
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/src-cpp/TopicPartitionImpl.cpp b/thirdparty/librdkafka-0.11.4/src-cpp/TopicPartitionImpl.cpp
new file mode 100644
index 0000000..71a688c
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/src-cpp/TopicPartitionImpl.cpp
@@ -0,0 +1,55 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include "rdkafkacpp_int.h"
+
+RdKafka::TopicPartition::~TopicPartition () {
+}
+
+RdKafka::TopicPartition *
+RdKafka::TopicPartition::create (const std::string &topic, int partition) {
+  return new TopicPartitionImpl(topic, partition);
+}
+
+RdKafka::TopicPartition *
+RdKafka::TopicPartition::create (const std::string &topic, int partition,
+                                 int64_t offset) {
+  return new TopicPartitionImpl(topic, partition, offset);
+}
+
+void
+RdKafka::TopicPartition::destroy (std::vector<TopicPartition*> &partitions) {
+  for (std::vector<TopicPartition*>::iterator it = partitions.begin() ;
+       it != partitions.end(); ++it)
+    delete(*it);
+  partitions.clear();
+}


[27/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_lz4.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_lz4.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_lz4.c
deleted file mode 100644
index 9ee50de..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_lz4.c
+++ /dev/null
@@ -1,429 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_lz4.h"
-
-#if WITH_LZ4_EXT
-#include <lz4frame.h>
-#else
-#include "lz4frame.h"
-#endif
-#include "xxhash.h"
-
-#include "rdbuf.h"
-
-/**
- * Fix-up bad LZ4 framing caused by buggy Kafka client / broker.
- * The LZ4F framing format is described in detail here:
- * https://github.com/Cyan4973/lz4/blob/master/lz4_Frame_format.md
- *
- * NOTE: This modifies 'inbuf'.
- *
- * Returns an error on failure to fix (nothing modified), else NO_ERROR.
- */
-static rd_kafka_resp_err_t
-rd_kafka_lz4_decompress_fixup_bad_framing (rd_kafka_broker_t *rkb,
-                                           char *inbuf, size_t inlen) {
-        static const char magic[4] = { 0x04, 0x22, 0x4d, 0x18 };
-        uint8_t FLG, HC, correct_HC;
-        size_t of = 4;
-
-        /* Format is:
-         *    int32_t magic;
-         *    int8_t_ FLG;
-         *    int8_t  BD;
-         *  [ int64_t contentSize; ]
-         *    int8_t  HC;
-         */
-        if (inlen < 4+3 || memcmp(inbuf, magic, 4)) {
-                rd_rkb_dbg(rkb, BROKER,  "LZ4FIXUP",
-                           "Unable to fix-up legacy LZ4 framing "
-                           "(%"PRIusz" bytes): invalid length or magic value",
-                           inlen);
-                return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-        }
-
-        of = 4; /* past magic */
-        FLG = inbuf[of++];
-        of++; /* BD */
-
-        if ((FLG >> 3) & 1) /* contentSize */
-                of += 8;
-
-        if (of >= inlen) {
-                rd_rkb_dbg(rkb, BROKER,  "LZ4FIXUP",
-                           "Unable to fix-up legacy LZ4 framing "
-                           "(%"PRIusz" bytes): requires %"PRIusz" bytes",
-                           inlen, of);
-                return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-        }
-
-        /* Header hash code */
-        HC = inbuf[of];
-
-        /* Calculate correct header hash code */
-        correct_HC = (XXH32(inbuf+4, of-4, 0) >> 8) & 0xff;
-
-        if (HC != correct_HC)
-                inbuf[of] = correct_HC;
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * Reverse of fix-up: break LZ4 framing caused to be compatbile with with
- * buggy Kafka client / broker.
- *
- * NOTE: This modifies 'outbuf'.
- *
- * Returns an error on failure to recognize format (nothing modified),
- * else NO_ERROR.
- */
-static rd_kafka_resp_err_t
-rd_kafka_lz4_compress_break_framing (rd_kafka_broker_t *rkb,
-                                     char *outbuf, size_t outlen) {
-        static const char magic[4] = { 0x04, 0x22, 0x4d, 0x18 };
-        uint8_t FLG, HC, bad_HC;
-        size_t of = 4;
-
-        /* Format is:
-         *    int32_t magic;
-         *    int8_t_ FLG;
-         *    int8_t  BD;
-         *  [ int64_t contentSize; ]
-         *    int8_t  HC;
-         */
-        if (outlen < 4+3 || memcmp(outbuf, magic, 4)) {
-                rd_rkb_dbg(rkb, BROKER,  "LZ4FIXDOWN",
-                           "Unable to break legacy LZ4 framing "
-                           "(%"PRIusz" bytes): invalid length or magic value",
-                           outlen);
-                return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-        }
-
-        of = 4; /* past magic */
-        FLG = outbuf[of++];
-        of++; /* BD */
-
-        if ((FLG >> 3) & 1) /* contentSize */
-                of += 8;
-
-        if (of >= outlen) {
-                rd_rkb_dbg(rkb, BROKER,  "LZ4FIXUP",
-                           "Unable to break legacy LZ4 framing "
-                           "(%"PRIusz" bytes): requires %"PRIusz" bytes",
-                           outlen, of);
-                return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-        }
-
-        /* Header hash code */
-        HC = outbuf[of];
-
-        /* Calculate bad header hash code (include magic) */
-        bad_HC = (XXH32(outbuf, of, 0) >> 8) & 0xff;
-
-        if (HC != bad_HC)
-                outbuf[of] = bad_HC;
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * @brief Decompress LZ4F (framed) data.
- *        Kafka broker versions <0.10.0.0 (MsgVersion 0) breaks LZ4 framing
- *        checksum, if \p proper_hc we assume the checksum is okay
- *        (broker version >=0.10.0, MsgVersion >= 1) else we fix it up.
- *
- * @remark May modify \p inbuf (if not \p proper_hc)
- */
-rd_kafka_resp_err_t
-rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset,
-                         char *inbuf, size_t inlen,
-                         void **outbuf, size_t *outlenp) {
-        LZ4F_errorCode_t code;
-        LZ4F_decompressionContext_t dctx;
-        LZ4F_frameInfo_t fi;
-        size_t in_sz, out_sz;
-        size_t in_of, out_of;
-        size_t r;
-        size_t estimated_uncompressed_size;
-        size_t outlen;
-        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-        char *out = NULL;
-
-        *outbuf = NULL;
-
-        code = LZ4F_createDecompressionContext(&dctx, LZ4F_VERSION);
-        if (LZ4F_isError(code)) {
-                rd_rkb_dbg(rkb, BROKER, "LZ4DECOMPR",
-                           "Unable to create LZ4 decompression context: %s",
-                           LZ4F_getErrorName(code));
-                return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
-        }
-
-        if (!proper_hc) {
-                /* The original/legacy LZ4 framing in Kafka was buggy and
-                 * calculated the LZ4 framing header hash code (HC) incorrectly.
-                 * We do a fix-up of it here. */
-                if ((err = rd_kafka_lz4_decompress_fixup_bad_framing(rkb,
-                                                                     inbuf,
-                                                                     inlen)))
-                        goto done;
-        }
-
-        in_sz = inlen;
-        r = LZ4F_getFrameInfo(dctx, &fi, (const void *)inbuf, &in_sz);
-        if (LZ4F_isError(r)) {
-                rd_rkb_dbg(rkb, BROKER, "LZ4DECOMPR",
-                           "Failed to gather LZ4 frame info: %s",
-                           LZ4F_getErrorName(r));
-                err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-                goto done;
-        }
-
-        /* If uncompressed size is unknown or out of bounds make up a
-         * worst-case uncompressed size
-         * More info on max size: http://stackoverflow.com/a/25751871/1821055 */
-        if (fi.contentSize == 0 || fi.contentSize > inlen * 255)
-                estimated_uncompressed_size = inlen * 255;
-        else
-                estimated_uncompressed_size = (size_t)fi.contentSize;
-
-        /* Allocate output buffer, we increase this later if needed,
-         * but hopefully not. */
-        out = rd_malloc(estimated_uncompressed_size);
-        if (!out) {
-                rd_rkb_log(rkb, LOG_WARNING, "LZ4DEC",
-                           "Unable to allocate decompression "
-                           "buffer of %zd bytes: %s",
-                           estimated_uncompressed_size, rd_strerror(errno));
-                err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
-                goto done;
-        }
-
-
-        /* Decompress input buffer to output buffer until input is exhausted. */
-        outlen = estimated_uncompressed_size;
-        in_of = in_sz;
-        out_of = 0;
-        while (in_of < inlen) {
-                out_sz = outlen - out_of;
-                in_sz = inlen - in_of;
-                r = LZ4F_decompress(dctx, out+out_of, &out_sz,
-                                    inbuf+in_of, &in_sz, NULL);
-                if (unlikely(LZ4F_isError(r))) {
-                        rd_rkb_dbg(rkb, MSG, "LZ4DEC",
-                                   "Failed to LZ4 (%s HC) decompress message "
-                                   "(offset %"PRId64") at "
-                                   "payload offset %"PRIusz"/%"PRIusz": %s",
-                                   proper_hc ? "proper":"legacy",
-                                   Offset, in_of, inlen,  LZ4F_getErrorName(r));
-                        err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-                        goto done;
-                }
-
-                rd_kafka_assert(NULL, out_of + out_sz < outlen &&
-                                in_of + in_sz <= inlen);
-                out_of += out_sz;
-                in_of += in_sz;
-                if (r == 0)
-                        break;
-
-                /* Need to grow output buffer, this shouldn't happen if
-                 * contentSize was properly set. */
-                if (unlikely(r > 0 && out_of == outlen)) {
-                        char *tmp;
-                        size_t extra = (r > 1024 ? r : 1024) * 2;
-
-                        rd_atomic64_add(&rkb->rkb_c.zbuf_grow, 1);
-
-                        if ((tmp = rd_realloc(outbuf, outlen + extra))) {
-                                rd_rkb_log(rkb, LOG_WARNING, "LZ4DEC",
-                                           "Unable to grow decompression "
-                                           "buffer to %zd+%zd bytes: %s",
-                                           outlen, extra,rd_strerror(errno));
-                                err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
-                                goto done;
-                        }
-                        outlen += extra;
-                }
-        }
-
-
-        if (in_of < inlen) {
-                rd_rkb_dbg(rkb, MSG, "LZ4DEC",
-                           "Failed to LZ4 (%s HC) decompress message "
-                           "(offset %"PRId64"): "
-                           "%"PRIusz" (out of %"PRIusz") bytes remaining",
-                           proper_hc ? "proper":"legacy",
-                           Offset, inlen-in_of, inlen);
-                err = RD_KAFKA_RESP_ERR__BAD_MSG;
-                goto done;
-        }
-
-        *outbuf = out;
-        *outlenp = out_of;
-
- done:
-        code = LZ4F_freeDecompressionContext(dctx);
-        if (LZ4F_isError(code)) {
-                rd_rkb_dbg(rkb, BROKER, "LZ4DECOMPR",
-                           "Failed to close LZ4 compression context: %s",
-                           LZ4F_getErrorName(code));
-                err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-        }
-
-        if (err && out)
-                rd_free(out);
-
-        return err;
-}
-
-
-/**
- * Allocate space for \p *outbuf and compress all \p iovlen buffers in \p iov.
- * @param proper_hc generate a proper HC (checksum) (kafka >=0.10.0.0, MsgVersion >= 1)
- * @param MessageSetSize indicates (at least) full uncompressed data size,
- *                       possibly including MessageSet fields that will not
- *                       be compressed.
- *
- * @returns allocated buffer in \p *outbuf, length in \p *outlenp.
- */
-rd_kafka_resp_err_t
-rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc,
-                       rd_slice_t *slice, void **outbuf, size_t *outlenp) {
-        LZ4F_compressionContext_t cctx;
-        LZ4F_errorCode_t r;
-        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-        size_t len = rd_slice_remains(slice);
-        size_t out_sz;
-        size_t out_of = 0;
-        char *out;
-        const void *p;
-        size_t rlen;
-
-        /* Required by Kafka */
-        const LZ4F_preferences_t prefs =
-                { .frameInfo = { .blockMode = LZ4F_blockIndependent } };
-
-        *outbuf = NULL;
-
-        out_sz = LZ4F_compressBound(len, NULL) + 1000;
-        if (LZ4F_isError(out_sz)) {
-                rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
-                           "Unable to query LZ4 compressed size "
-                           "(for %"PRIusz" uncompressed bytes): %s",
-                           len, LZ4F_getErrorName(out_sz));
-                return RD_KAFKA_RESP_ERR__BAD_MSG;
-        }
-
-        out = rd_malloc(out_sz);
-        if (!out) {
-                rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
-                           "Unable to allocate output buffer "
-                           "(%"PRIusz" bytes): %s",
-                           out_sz, rd_strerror(errno));
-                return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
-        }
-
-        r = LZ4F_createCompressionContext(&cctx, LZ4F_VERSION);
-        if (LZ4F_isError(r)) {
-                rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
-                           "Unable to create LZ4 compression context: %s",
-                           LZ4F_getErrorName(r));
-                return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
-        }
-
-        r = LZ4F_compressBegin(cctx, out, out_sz, &prefs);
-        if (LZ4F_isError(r)) {
-                rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
-                           "Unable to begin LZ4 compression "
-                           "(out buffer is %"PRIusz" bytes): %s",
-                           out_sz, LZ4F_getErrorName(r));
-                err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-                goto done;
-        }
-
-        out_of += r;
-
-        while ((rlen = rd_slice_reader(slice, &p))) {
-                rd_assert(out_of < out_sz);
-                r = LZ4F_compressUpdate(cctx, out+out_of, out_sz-out_of,
-                                        p, rlen, NULL);
-                if (unlikely(LZ4F_isError(r))) {
-                        rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
-                                   "LZ4 compression failed "
-                                   "(at of %"PRIusz" bytes, with "
-                                   "%"PRIusz" bytes remaining in out buffer): "
-                                   "%s",
-                                   rlen, out_sz - out_of,
-                                   LZ4F_getErrorName(r));
-                        err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-                        goto done;
-                }
-
-                out_of += r;
-        }
-
-        rd_assert(rd_slice_remains(slice) == 0);
-
-        r = LZ4F_compressEnd(cctx, out+out_of, out_sz-out_of, NULL);
-        if (unlikely(LZ4F_isError(r))) {
-                rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
-                           "Failed to finalize LZ4 compression "
-                           "of %"PRIusz" bytes: %s",
-                           len, LZ4F_getErrorName(r));
-                err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-                goto done;
-        }
-
-        out_of += r;
-
-        /* For the broken legacy framing we need to mess up the header checksum
-         * so that the Kafka client / broker code accepts it. */
-        if (!proper_hc)
-                if ((err = rd_kafka_lz4_compress_break_framing(rkb,
-                                                               out, out_of)))
-                        goto done;
-
-
-        *outbuf  = out;
-        *outlenp = out_of;
-
- done:
-        LZ4F_freeCompressionContext(cctx);
-
-        if (err)
-                rd_free(out);
-
-        return err;
-
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_lz4.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_lz4.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_lz4.h
deleted file mode 100644
index fb72f21..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_lz4.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#pragma once
-
-
-rd_kafka_resp_err_t
-rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset,
-                         char *inbuf, size_t inlen,
-                         void **outbuf, size_t *outlenp);
-
-rd_kafka_resp_err_t
-rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc,
-                       rd_slice_t *slice, void **outbuf, size_t *outlenp);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_metadata.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_metadata.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_metadata.c
deleted file mode 100644
index 135ac84..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_metadata.c
+++ /dev/null
@@ -1,1017 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_request.h"
-#include "rdkafka_metadata.h"
-
-#include <string.h>
-
-
-
-rd_kafka_resp_err_t
-rd_kafka_metadata (rd_kafka_t *rk, int all_topics,
-                   rd_kafka_topic_t *only_rkt,
-                   const struct rd_kafka_metadata **metadatap,
-                   int timeout_ms) {
-        rd_kafka_q_t *rkq;
-        rd_kafka_broker_t *rkb;
-        rd_kafka_op_t *rko;
-	rd_ts_t ts_end = rd_timeout_init(timeout_ms);
-        rd_list_t topics;
-
-        /* Query any broker that is up, and if none are up pick the first one,
-         * if we're lucky it will be up before the timeout */
-	rkb = rd_kafka_broker_any_usable(rk, timeout_ms, 1);
-	if (!rkb)
-		return RD_KAFKA_RESP_ERR__TRANSPORT;
-
-        rkq = rd_kafka_q_new(rk);
-
-        rd_list_init(&topics, 0, rd_free);
-        if (!all_topics) {
-                if (only_rkt)
-                        rd_list_add(&topics,
-                                    rd_strdup(rd_kafka_topic_a2i(only_rkt)->
-                                              rkt_topic->str));
-                else
-                        rd_kafka_local_topics_to_list(rkb->rkb_rk, &topics);
-        }
-
-        /* Async: request metadata */
-        rko = rd_kafka_op_new(RD_KAFKA_OP_METADATA);
-        rd_kafka_op_set_replyq(rko, rkq, 0);
-        rko->rko_u.metadata.force = 1; /* Force metadata request regardless
-                                        * of outstanding metadata requests. */
-        rd_kafka_MetadataRequest(rkb, &topics, "application requested", rko);
-
-        rd_list_destroy(&topics);
-        rd_kafka_broker_destroy(rkb);
-
-        /* Wait for reply (or timeout) */
-        rko = rd_kafka_q_pop(rkq, rd_timeout_remains(ts_end), 0);
-
-        rd_kafka_q_destroy(rkq);
-
-        /* Timeout */
-        if (!rko)
-                return RD_KAFKA_RESP_ERR__TIMED_OUT;
-
-        /* Error */
-        if (rko->rko_err) {
-                rd_kafka_resp_err_t err = rko->rko_err;
-                rd_kafka_op_destroy(rko);
-                return err;
-        }
-
-        /* Reply: pass metadata pointer to application who now owns it*/
-        rd_kafka_assert(rk, rko->rko_u.metadata.md);
-        *metadatap = rko->rko_u.metadata.md;
-        rko->rko_u.metadata.md = NULL;
-        rd_kafka_op_destroy(rko);
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-void rd_kafka_metadata_destroy (const struct rd_kafka_metadata *metadata) {
-        rd_free((void *)metadata);
-}
-
-
-/**
- * @returns a newly allocated copy of metadata \p src of size \p size
- */
-struct rd_kafka_metadata *
-rd_kafka_metadata_copy (const struct rd_kafka_metadata *src, size_t size) {
-	struct rd_kafka_metadata *md;
-	rd_tmpabuf_t tbuf;
-	int i;
-
-	/* metadata is stored in one contigious buffer where structs and
-	 * and pointed-to fields are layed out in a memory aligned fashion.
-	 * rd_tmpabuf_t provides the infrastructure to do this.
-	 * Because of this we copy all the structs verbatim but
-	 * any pointer fields needs to be copied explicitly to update
-	 * the pointer address. */
-	rd_tmpabuf_new(&tbuf, size, 1/*assert on fail*/);
-	md = rd_tmpabuf_write(&tbuf, src, sizeof(*md));
-
-	rd_tmpabuf_write_str(&tbuf, src->orig_broker_name);
-
-
-	/* Copy Brokers */
-	md->brokers = rd_tmpabuf_write(&tbuf, src->brokers,
-				      md->broker_cnt * sizeof(*md->brokers));
-
-	for (i = 0 ; i < md->broker_cnt ; i++)
-		md->brokers[i].host =
-			rd_tmpabuf_write_str(&tbuf, src->brokers[i].host);
-
-
-	/* Copy TopicMetadata */
-        md->topics = rd_tmpabuf_write(&tbuf, src->topics,
-				      md->topic_cnt * sizeof(*md->topics));
-
-	for (i = 0 ; i < md->topic_cnt ; i++) {
-		int j;
-
-		md->topics[i].topic = rd_tmpabuf_write_str(&tbuf,
-							   src->topics[i].topic);
-
-
-		/* Copy partitions */
-		md->topics[i].partitions =
-			rd_tmpabuf_write(&tbuf, src->topics[i].partitions,
-					 md->topics[i].partition_cnt *
-					 sizeof(*md->topics[i].partitions));
-
-		for (j = 0 ; j < md->topics[i].partition_cnt ; j++) {
-			/* Copy replicas and ISRs */
-			md->topics[i].partitions[j].replicas =
-				rd_tmpabuf_write(&tbuf,
-						 src->topics[i].partitions[j].
-						 replicas,
-						 md->topics[i].partitions[j].
-						 replica_cnt *
-						 sizeof(*md->topics[i].
-							partitions[j].
-							replicas));
-
-			md->topics[i].partitions[j].isrs =
-				rd_tmpabuf_write(&tbuf,
-						 src->topics[i].partitions[j].
-						 isrs,
-						 md->topics[i].partitions[j].
-						 isr_cnt *
-						 sizeof(*md->topics[i].
-							partitions[j].
-							isrs));
-
-		}
-	}
-
-	/* Check for tmpabuf errors */
-	if (rd_tmpabuf_failed(&tbuf))
-		rd_kafka_assert(NULL, !*"metadata copy failed");
-
-	/* Delibarely not destroying the tmpabuf since we return
-	 * its allocated memory. */
-
-	return md;
-}
-
-
-
-
-/**
- * Handle a Metadata response message.
- *
- * @param topics are the requested topics (may be NULL)
- *
- * The metadata will be marshalled into 'struct rd_kafka_metadata*' structs.
- *
- * Returns the marshalled metadata, or NULL on parse error.
- *
- * @locality rdkafka main thread
- */
-struct rd_kafka_metadata *
-rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb,
-                         rd_kafka_buf_t *request,
-                         rd_kafka_buf_t *rkbuf) {
-        rd_kafka_t *rk = rkb->rkb_rk;
-        int i, j, k;
-        rd_tmpabuf_t tbuf;
-        struct rd_kafka_metadata *md;
-        size_t rkb_namelen;
-        const int log_decode_errors = LOG_ERR;
-        rd_list_t *missing_topics = NULL;
-        const rd_list_t *requested_topics = request->rkbuf_u.Metadata.topics;
-        int all_topics = request->rkbuf_u.Metadata.all_topics;
-        const char *reason = request->rkbuf_u.Metadata.reason ?
-                request->rkbuf_u.Metadata.reason : "(no reason)";
-        int ApiVersion = request->rkbuf_reqhdr.ApiVersion;
-        rd_kafkap_str_t cluster_id = RD_ZERO_INIT;
-        int32_t controller_id = -1;
-
-        rd_kafka_assert(NULL, thrd_is_current(rk->rk_thread));
-
-        /* Remove topics from missing_topics as they are seen in Metadata. */
-        if (requested_topics)
-                missing_topics = rd_list_copy(requested_topics,
-                                              rd_list_string_copy, NULL);
-
-        rd_kafka_broker_lock(rkb);
-        rkb_namelen = strlen(rkb->rkb_name)+1;
-        /* We assume that the marshalled representation is
-         * no more than 4 times larger than the wire representation. */
-        rd_tmpabuf_new(&tbuf,
-                       sizeof(*md) + rkb_namelen + (rkbuf->rkbuf_totlen * 4),
-                       0/*dont assert on fail*/);
-
-        if (!(md = rd_tmpabuf_alloc(&tbuf, sizeof(*md))))
-                goto err;
-        md->orig_broker_id = rkb->rkb_nodeid;
-        md->orig_broker_name = rd_tmpabuf_write(&tbuf,
-                                                rkb->rkb_name, rkb_namelen);
-        rd_kafka_broker_unlock(rkb);
-
-        /* Read Brokers */
-        rd_kafka_buf_read_i32a(rkbuf, md->broker_cnt);
-        if (md->broker_cnt > RD_KAFKAP_BROKERS_MAX)
-                rd_kafka_buf_parse_fail(rkbuf, "Broker_cnt %i > BROKERS_MAX %i",
-                                        md->broker_cnt, RD_KAFKAP_BROKERS_MAX);
-
-        if (!(md->brokers = rd_tmpabuf_alloc(&tbuf, md->broker_cnt *
-                                             sizeof(*md->brokers))))
-                rd_kafka_buf_parse_fail(rkbuf,
-                                        "%d brokers: tmpabuf memory shortage",
-                                        md->broker_cnt);
-
-        for (i = 0 ; i < md->broker_cnt ; i++) {
-                rd_kafka_buf_read_i32a(rkbuf, md->brokers[i].id);
-                rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf, md->brokers[i].host);
-                rd_kafka_buf_read_i32a(rkbuf, md->brokers[i].port);
-
-                if (ApiVersion >= 1) {
-                        rd_kafkap_str_t rack;
-                        rd_kafka_buf_read_str(rkbuf, &rack);
-                }
-        }
-
-        if (ApiVersion >= 2)
-                rd_kafka_buf_read_str(rkbuf, &cluster_id);
-
-        if (ApiVersion >= 1) {
-                rd_kafka_buf_read_i32(rkbuf, &controller_id);
-                rd_rkb_dbg(rkb, METADATA,
-                           "METADATA", "ClusterId: %.*s, ControllerId: %"PRId32,
-                           RD_KAFKAP_STR_PR(&cluster_id), controller_id);
-        }
-
-
-
-        /* Read TopicMetadata */
-        rd_kafka_buf_read_i32a(rkbuf, md->topic_cnt);
-        rd_rkb_dbg(rkb, METADATA, "METADATA", "%i brokers, %i topics",
-                   md->broker_cnt, md->topic_cnt);
-
-        if (md->topic_cnt > RD_KAFKAP_TOPICS_MAX)
-                rd_kafka_buf_parse_fail(rkbuf, "TopicMetadata_cnt %"PRId32
-                                        " > TOPICS_MAX %i",
-                                        md->topic_cnt, RD_KAFKAP_TOPICS_MAX);
-
-        if (!(md->topics = rd_tmpabuf_alloc(&tbuf,
-                                            md->topic_cnt *
-                                            sizeof(*md->topics))))
-                rd_kafka_buf_parse_fail(rkbuf,
-                                        "%d topics: tmpabuf memory shortage",
-                                        md->topic_cnt);
-
-        for (i = 0 ; i < md->topic_cnt ; i++) {
-                rd_kafka_buf_read_i16a(rkbuf, md->topics[i].err);
-                rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf, md->topics[i].topic);
-                if (ApiVersion >= 1) {
-                        int8_t is_internal;
-                        rd_kafka_buf_read_i8(rkbuf, &is_internal);
-                }
-
-                /* PartitionMetadata */
-                rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partition_cnt);
-                if (md->topics[i].partition_cnt > RD_KAFKAP_PARTITIONS_MAX)
-                        rd_kafka_buf_parse_fail(rkbuf,
-                                                "TopicMetadata[%i]."
-                                                "PartitionMetadata_cnt %i "
-                                                "> PARTITIONS_MAX %i",
-                                                i, md->topics[i].partition_cnt,
-                                                RD_KAFKAP_PARTITIONS_MAX);
-
-                if (!(md->topics[i].partitions =
-                      rd_tmpabuf_alloc(&tbuf,
-                                       md->topics[i].partition_cnt *
-                                       sizeof(*md->topics[i].partitions))))
-                        rd_kafka_buf_parse_fail(rkbuf,
-                                                "%s: %d partitions: "
-                                                "tmpabuf memory shortage",
-                                                md->topics[i].topic,
-                                                md->topics[i].partition_cnt);
-
-                for (j = 0 ; j < md->topics[i].partition_cnt ; j++) {
-                        rd_kafka_buf_read_i16a(rkbuf, md->topics[i].partitions[j].err);
-                        rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].id);
-                        rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].leader);
-
-                        /* Replicas */
-                        rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].replica_cnt);
-                        if (md->topics[i].partitions[j].replica_cnt >
-                            RD_KAFKAP_BROKERS_MAX)
-                                rd_kafka_buf_parse_fail(rkbuf,
-                                                        "TopicMetadata[%i]."
-                                                        "PartitionMetadata[%i]."
-                                                        "Replica_cnt "
-                                                        "%i > BROKERS_MAX %i",
-                                                        i, j,
-                                                        md->topics[i].
-                                                        partitions[j].
-                                                        replica_cnt,
-                                                        RD_KAFKAP_BROKERS_MAX);
-
-                        if (!(md->topics[i].partitions[j].replicas =
-                              rd_tmpabuf_alloc(&tbuf,
-                                               md->topics[i].
-                                               partitions[j].replica_cnt *
-                                               sizeof(*md->topics[i].
-                                                      partitions[j].replicas))))
-                                rd_kafka_buf_parse_fail(
-                                        rkbuf,
-                                        "%s [%"PRId32"]: %d replicas: "
-                                        "tmpabuf memory shortage",
-                                        md->topics[i].topic,
-                                        md->topics[i].partitions[j].id,
-                                        md->topics[i].partitions[j].replica_cnt);
-
-
-                        for (k = 0 ;
-                             k < md->topics[i].partitions[j].replica_cnt; k++)
-                                rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].
-                                           replicas[k]);
-
-                        /* Isrs */
-                        rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].isr_cnt);
-                        if (md->topics[i].partitions[j].isr_cnt >
-                            RD_KAFKAP_BROKERS_MAX)
-                                rd_kafka_buf_parse_fail(rkbuf,
-                                                        "TopicMetadata[%i]."
-                                                        "PartitionMetadata[%i]."
-                                                        "Isr_cnt "
-                                                        "%i > BROKERS_MAX %i",
-                                                        i, j,
-                                                        md->topics[i].
-                                                        partitions[j].isr_cnt,
-                                                        RD_KAFKAP_BROKERS_MAX);
-
-                        if (!(md->topics[i].partitions[j].isrs =
-                              rd_tmpabuf_alloc(&tbuf,
-                                               md->topics[i].
-                                               partitions[j].isr_cnt *
-                                               sizeof(*md->topics[i].
-                                                      partitions[j].isrs))))
-                                rd_kafka_buf_parse_fail(
-                                        rkbuf,
-                                        "%s [%"PRId32"]: %d isrs: "
-                                        "tmpabuf memory shortage",
-                                        md->topics[i].topic,
-                                        md->topics[i].partitions[j].id,
-                                        md->topics[i].partitions[j].isr_cnt);
-
-
-                        for (k = 0 ;
-                             k < md->topics[i].partitions[j].isr_cnt; k++)
-                                rd_kafka_buf_read_i32a(rkbuf, md->topics[i].
-                                                       partitions[j].isrs[k]);
-
-                }
-        }
-
-        /* Entire Metadata response now parsed without errors:
-         * update our internal state according to the response. */
-
-        /* Avoid metadata updates when we're terminating. */
-        if (rd_kafka_terminating(rkb->rkb_rk))
-                goto done;
-
-        if (md->broker_cnt == 0 && md->topic_cnt == 0) {
-                rd_rkb_dbg(rkb, METADATA, "METADATA",
-                           "No brokers or topics in metadata: retrying");
-                goto err;
-        }
-
-        /* Update our list of brokers. */
-        for (i = 0 ; i < md->broker_cnt ; i++) {
-                rd_rkb_dbg(rkb, METADATA, "METADATA",
-                           "  Broker #%i/%i: %s:%i NodeId %"PRId32,
-                           i, md->broker_cnt,
-                           md->brokers[i].host,
-                           md->brokers[i].port,
-                           md->brokers[i].id);
-                rd_kafka_broker_update(rkb->rkb_rk, rkb->rkb_proto,
-                                       &md->brokers[i]);
-        }
-
-        /* Update partition count and leader for each topic we know about */
-        for (i = 0 ; i < md->topic_cnt ; i++) {
-                rd_kafka_metadata_topic_t *mdt = &md->topics[i];
-                rd_rkb_dbg(rkb, METADATA, "METADATA",
-                           "  Topic #%i/%i: %s with %i partitions%s%s",
-                           i, md->topic_cnt, mdt->topic,
-                           mdt->partition_cnt,
-                           mdt->err ? ": " : "",
-                           mdt->err ? rd_kafka_err2str(mdt->err) : "");
-
-                /* Ignore topics in blacklist */
-                if (rkb->rkb_rk->rk_conf.topic_blacklist &&
-                    rd_kafka_pattern_match(rkb->rkb_rk->rk_conf.topic_blacklist,
-                                           mdt->topic)) {
-                        rd_rkb_dbg(rkb, TOPIC, "BLACKLIST",
-                                   "Ignoring blacklisted topic \"%s\" "
-                                   "in metadata", mdt->topic);
-                        continue;
-                }
-
-                /* Ignore metadata completely for temporary errors. (issue #513)
-                 *   LEADER_NOT_AVAILABLE: Broker is rebalancing
-                 */
-                if (mdt->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE &&
-                    mdt->partition_cnt == 0) {
-                        rd_rkb_dbg(rkb, TOPIC, "METADATA",
-                                   "Temporary error in metadata reply for "
-                                   "topic %s (PartCnt %i): %s: ignoring",
-                                   mdt->topic, mdt->partition_cnt,
-                                   rd_kafka_err2str(mdt->err));
-                        rd_list_free_cb(missing_topics,
-                                        rd_list_remove_cmp(missing_topics,
-                                                           mdt->topic,
-                                                           (void *)strcmp));
-                        continue;
-                }
-
-
-                /* Update local topic & partition state based on metadata */
-                rd_kafka_topic_metadata_update2(rkb, mdt);
-
-                if (requested_topics) {
-                        rd_list_free_cb(missing_topics,
-                                        rd_list_remove_cmp(missing_topics,
-                                                           mdt->topic,
-                                                           (void*)strcmp));
-                        if (!all_topics) {
-                                rd_kafka_wrlock(rk);
-                                rd_kafka_metadata_cache_topic_update(rk, mdt);
-                                rd_kafka_wrunlock(rk);
-                        }
-                }
-        }
-
-
-        /* Requested topics not seen in metadata? Propogate to topic code. */
-        if (missing_topics) {
-                char *topic;
-                rd_rkb_dbg(rkb, TOPIC, "METADATA",
-                           "%d/%d requested topic(s) seen in metadata",
-                           rd_list_cnt(requested_topics) -
-                           rd_list_cnt(missing_topics),
-                           rd_list_cnt(requested_topics));
-                for (i = 0 ; i < rd_list_cnt(missing_topics) ; i++)
-                        rd_rkb_dbg(rkb, TOPIC, "METADATA", "wanted %s",
-                                   (char *)(missing_topics->rl_elems[i]));
-                RD_LIST_FOREACH(topic, missing_topics, i) {
-                        shptr_rd_kafka_itopic_t *s_rkt;
-
-                        s_rkt = rd_kafka_topic_find(rkb->rkb_rk, topic, 1/*lock*/);
-                        if (s_rkt) {
-                                rd_kafka_topic_metadata_none(
-                                        rd_kafka_topic_s2i(s_rkt));
-                                rd_kafka_topic_destroy0(s_rkt);
-                        }
-                }
-        }
-
-
-        rd_kafka_wrlock(rkb->rkb_rk);
-        rkb->rkb_rk->rk_ts_metadata = rd_clock();
-
-        /* Update cached cluster id. */
-        if (RD_KAFKAP_STR_LEN(&cluster_id) > 0 &&
-            (!rkb->rkb_rk->rk_clusterid ||
-             rd_kafkap_str_cmp_str(&cluster_id, rkb->rkb_rk->rk_clusterid))) {
-                rd_rkb_dbg(rkb, BROKER|RD_KAFKA_DBG_GENERIC, "CLUSTERID",
-                           "ClusterId update \"%s\" -> \"%.*s\"",
-                           rkb->rkb_rk->rk_clusterid ?
-                           rkb->rkb_rk->rk_clusterid : "",
-                           RD_KAFKAP_STR_PR(&cluster_id));
-                if (rkb->rkb_rk->rk_clusterid)
-                        rd_free(rkb->rkb_rk->rk_clusterid);
-                rkb->rkb_rk->rk_clusterid = RD_KAFKAP_STR_DUP(&cluster_id);
-        }
-
-        if (all_topics) {
-                rd_kafka_metadata_cache_update(rkb->rkb_rk,
-                                               md, 1/*abs update*/);
-
-                if (rkb->rkb_rk->rk_full_metadata)
-                        rd_kafka_metadata_destroy(rkb->rkb_rk->rk_full_metadata);
-                rkb->rkb_rk->rk_full_metadata =
-                        rd_kafka_metadata_copy(md, tbuf.of);
-                rkb->rkb_rk->rk_ts_full_metadata = rkb->rkb_rk->rk_ts_metadata;
-                rd_rkb_dbg(rkb, METADATA, "METADATA",
-                           "Caching full metadata with "
-                           "%d broker(s) and %d topic(s): %s",
-                           md->broker_cnt, md->topic_cnt, reason);
-        } else {
-                rd_kafka_metadata_cache_expiry_start(rk);
-        }
-
-        /* Remove cache hints for the originally requested topics. */
-        if (requested_topics)
-                rd_kafka_metadata_cache_purge_hints(rk, requested_topics);
-
-        rd_kafka_wrunlock(rkb->rkb_rk);
-
-        /* Check if cgrp effective subscription is affected by
-         * new metadata. */
-        if (rkb->rkb_rk->rk_cgrp)
-                rd_kafka_cgrp_metadata_update_check(
-                        rkb->rkb_rk->rk_cgrp, 1/*do join*/);
-
-
-
-done:
-        if (missing_topics)
-                rd_list_destroy(missing_topics);
-
-        /* This metadata request was triggered by someone wanting
-         * the metadata information back as a reply, so send that reply now.
-         * In this case we must not rd_free the metadata memory here,
-         * the requestee will do.
-         * The tbuf is explicitly not destroyed as we return its memory
-         * to the caller. */
-        return md;
-
- err_parse:
-err:
-        if (requested_topics) {
-                /* Failed requests shall purge cache hints for
-                 * the requested topics. */
-                rd_kafka_wrlock(rkb->rkb_rk);
-                rd_kafka_metadata_cache_purge_hints(rk, requested_topics);
-                rd_kafka_wrunlock(rkb->rkb_rk);
-        }
-
-        if (missing_topics)
-                rd_list_destroy(missing_topics);
-
-        rd_tmpabuf_destroy(&tbuf);
-        return NULL;
-}
-
-
-/**
- * @brief Add all topics in current cached full metadata
- *        to \p tinfos (rd_kafka_topic_info_t *)
- *        that matches the topics in \p match
- *
- * @returns the number of topics matched and added to \p list
- *
- * @locks none
- * @locality any
- */
-size_t
-rd_kafka_metadata_topic_match (rd_kafka_t *rk, rd_list_t *tinfos,
-                               const rd_kafka_topic_partition_list_t *match) {
-        int ti;
-        size_t cnt = 0;
-        const struct rd_kafka_metadata *metadata;
-
-
-        rd_kafka_rdlock(rk);
-        metadata = rk->rk_full_metadata;
-        if (!metadata) {
-                rd_kafka_rdunlock(rk);
-                return 0;
-        }
-
-        /* For each topic in the cluster, scan through the match list
-         * to find matching topic. */
-        for (ti = 0 ; ti < metadata->topic_cnt ; ti++) {
-                const char *topic = metadata->topics[ti].topic;
-                int i;
-
-                /* Ignore topics in blacklist */
-                if (rk->rk_conf.topic_blacklist &&
-                    rd_kafka_pattern_match(rk->rk_conf.topic_blacklist, topic))
-                        continue;
-
-                /* Scan for matches */
-                for (i = 0 ; i < match->cnt ; i++) {
-                        if (!rd_kafka_topic_match(rk,
-                                                  match->elems[i].topic, topic))
-                                continue;
-
-                        if (metadata->topics[ti].err)
-                                continue; /* Skip errored topics */
-
-                        rd_list_add(tinfos,
-                                    rd_kafka_topic_info_new(
-                                            topic,
-                                            metadata->topics[ti].partition_cnt));
-                        cnt++;
-                }
-        }
-        rd_kafka_rdunlock(rk);
-
-        return cnt;
-}
-
-
-/**
- * @brief Add all topics in \p match that matches cached metadata.
- * @remark MUST NOT be used with wildcard topics,
- *         see rd_kafka_metadata_topic_match() for that.
- *
- * @returns the number of topics matched and added to \p tinfos
- * @locks none
- */
-size_t
-rd_kafka_metadata_topic_filter (rd_kafka_t *rk, rd_list_t *tinfos,
-                               const rd_kafka_topic_partition_list_t *match) {
-        int i;
-        size_t cnt = 0;
-
-        rd_kafka_rdlock(rk);
-        /* For each topic in match, look up the topic in the cache. */
-        for (i = 0 ; i < match->cnt ; i++) {
-                const char *topic = match->elems[i].topic;
-                const rd_kafka_metadata_topic_t *mtopic;
-
-                /* Ignore topics in blacklist */
-                if (rk->rk_conf.topic_blacklist &&
-                    rd_kafka_pattern_match(rk->rk_conf.topic_blacklist, topic))
-                        continue;
-
-                mtopic = rd_kafka_metadata_cache_topic_get(rk, topic,
-                                                           1/*valid*/);
-                if (mtopic && !mtopic->err) {
-                        rd_list_add(tinfos,
-                                    rd_kafka_topic_info_new(
-                                            topic, mtopic->partition_cnt));
-
-                        cnt++;
-                }
-        }
-        rd_kafka_rdunlock(rk);
-
-        return cnt;
-}
-
-
-void rd_kafka_metadata_log (rd_kafka_t *rk, const char *fac,
-                            const struct rd_kafka_metadata *md) {
-        int i;
-
-        rd_kafka_dbg(rk, METADATA, fac,
-                     "Metadata with %d broker(s) and %d topic(s):",
-                     md->broker_cnt, md->topic_cnt);
-
-        for (i = 0 ; i < md->broker_cnt ; i++) {
-                rd_kafka_dbg(rk, METADATA, fac,
-                             "  Broker #%i/%i: %s:%i NodeId %"PRId32,
-                             i, md->broker_cnt,
-                             md->brokers[i].host,
-                             md->brokers[i].port,
-                             md->brokers[i].id);
-        }
-
-        for (i = 0 ; i < md->topic_cnt ; i++) {
-                rd_kafka_dbg(rk, METADATA, fac,
-                             "  Topic #%i/%i: %s with %i partitions%s%s",
-                             i, md->topic_cnt, md->topics[i].topic,
-                             md->topics[i].partition_cnt,
-                             md->topics[i].err ? ": " : "",
-                             md->topics[i].err ?
-                             rd_kafka_err2str(md->topics[i].err) : "");
-        }
-}
-
-
-
-
-/**
- * @brief Refresh metadata for \p topics
- *
- * @param rk: used to look up usable broker if \p rkb is NULL.
- * @param rkb: use this broker, unless NULL then any usable broker from \p rk
- * @param force: force refresh even if topics are up-to-date in cache
- *
- * @returns an error code
- *
- * @locality any
- * @locks none
- */
-rd_kafka_resp_err_t
-rd_kafka_metadata_refresh_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb,
-                                  const rd_list_t *topics, int force,
-                                  const char *reason) {
-        rd_list_t q_topics;
-        int destroy_rkb = 0;
-
-        if (!rk)
-                rk = rkb->rkb_rk;
-
-        rd_kafka_wrlock(rk);
-
-        if (!rkb) {
-                if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT, 0))){
-                        rd_kafka_wrunlock(rk);
-                        rd_kafka_dbg(rk, METADATA, "METADATA",
-                                     "Skipping metadata refresh of %d topic(s):"
-                                     " no usable brokers",
-                                     rd_list_cnt(topics));
-                        return RD_KAFKA_RESP_ERR__TRANSPORT;
-                }
-                destroy_rkb = 1;
-        }
-
-        rd_list_init(&q_topics, rd_list_cnt(topics), rd_free);
-
-        if (!force) {
-
-                /* Hint cache of upcoming MetadataRequest and filter
-                 * out any topics that are already being requested.
-                 * q_topics will contain remaining topics to query. */
-                rd_kafka_metadata_cache_hint(rk, topics, &q_topics,
-                                             0/*dont replace*/);
-                rd_kafka_wrunlock(rk);
-
-                if (rd_list_cnt(&q_topics) == 0) {
-                        /* No topics need new query. */
-                        rd_kafka_dbg(rk, METADATA, "METADATA",
-                                     "Skipping metadata refresh of "
-                                     "%d topic(s): %s: "
-                                     "already being requested",
-                                     rd_list_cnt(topics), reason);
-                        rd_list_destroy(&q_topics);
-                        if (destroy_rkb)
-                                rd_kafka_broker_destroy(rkb);
-                        return RD_KAFKA_RESP_ERR_NO_ERROR;
-                }
-
-        } else {
-                rd_kafka_wrunlock(rk);
-                rd_list_copy_to(&q_topics, topics, rd_list_string_copy, NULL);
-        }
-
-        rd_kafka_dbg(rk, METADATA, "METADATA",
-                     "Requesting metadata for %d/%d topics: %s",
-                     rd_list_cnt(&q_topics), rd_list_cnt(topics), reason);
-
-        rd_kafka_MetadataRequest(rkb, &q_topics, reason, NULL);
-
-        rd_list_destroy(&q_topics);
-
-        if (destroy_rkb)
-                rd_kafka_broker_destroy(rkb);
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Refresh metadata for known topics
- *
- * @param rk: used to look up usable broker if \p rkb is NULL.
- * @param rkb: use this broker, unless NULL then any usable broker from \p rk
- * @param force: refresh even if cache is up-to-date
- *
- * @returns an error code (__UNKNOWN_TOPIC if there are no local topics)
- *
- * @locality any
- * @locks none
- */
-rd_kafka_resp_err_t
-rd_kafka_metadata_refresh_known_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb,
-                                        int force, const char *reason) {
-        rd_list_t topics;
-        rd_kafka_resp_err_t err;
-
-        if (!rk)
-                rk = rkb->rkb_rk;
-
-        rd_list_init(&topics, 8, rd_free);
-        rd_kafka_local_topics_to_list(rk, &topics);
-
-        if (rd_list_cnt(&topics) == 0)
-                err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
-        else
-                err = rd_kafka_metadata_refresh_topics(rk, rkb,
-                                                       &topics, force, reason);
-
-        rd_list_destroy(&topics);
-
-        return err;
-}
-
-
-/**
- * @brief Refresh broker list by metadata.
- *
- * Attempts to use sparse metadata request if possible, else falls back
- * on a full metadata request. (NOTE: sparse not implemented, KIP-4)
- *
- * @param rk: used to look up usable broker if \p rkb is NULL.
- * @param rkb: use this broker, unless NULL then any usable broker from \p rk
- *
- * @returns an error code
- *
- * @locality any
- * @locks none
- */
-rd_kafka_resp_err_t
-rd_kafka_metadata_refresh_brokers (rd_kafka_t *rk, rd_kafka_broker_t *rkb,
-                                   const char *reason) {
-        return rd_kafka_metadata_request(rk, rkb, NULL /*brokers only*/,
-                                         reason, NULL);
-}
-
-
-
-/**
- * @brief Refresh metadata for all topics in cluster.
- *        This is a full metadata request which might be taxing on the
- *        broker if the cluster has many topics.
- *
- * @locality any
- * @locks none
- */
-rd_kafka_resp_err_t
-rd_kafka_metadata_refresh_all (rd_kafka_t *rk, rd_kafka_broker_t *rkb,
-                               const char *reason) {
-        int destroy_rkb = 0;
-        rd_list_t topics;
-
-        if (!rk)
-                rk = rkb->rkb_rk;
-
-        if (!rkb) {
-                if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT, 1)))
-                        return RD_KAFKA_RESP_ERR__TRANSPORT;
-                destroy_rkb = 1;
-        }
-
-        rd_list_init(&topics, 0, NULL); /* empty list = all topics */
-        rd_kafka_MetadataRequest(rkb, &topics, reason, NULL);
-        rd_list_destroy(&topics);
-
-        if (destroy_rkb)
-                rd_kafka_broker_destroy(rkb);
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
-
- * @brief Lower-level Metadata request that takes a callback (with replyq set)
- *        which will be triggered after parsing is complete.
- *
- * @locks none
- * @locality any
- */
-rd_kafka_resp_err_t
-rd_kafka_metadata_request (rd_kafka_t *rk, rd_kafka_broker_t *rkb,
-                           const rd_list_t *topics,
-                           const char *reason, rd_kafka_op_t *rko) {
-        int destroy_rkb = 0;
-
-        if (!rkb) {
-                if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT, 1)))
-                        return RD_KAFKA_RESP_ERR__TRANSPORT;
-                destroy_rkb = 1;
-        }
-
-        rd_kafka_MetadataRequest(rkb, topics, reason, rko);
-
-        if (destroy_rkb)
-                rd_kafka_broker_destroy(rkb);
-
-        return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Query timer callback to trigger refresh for topics
- *        that are missing their leaders.
- *
- * @locks none
- * @locality rdkafka main thread
- */
-static void rd_kafka_metadata_leader_query_tmr_cb (rd_kafka_timers_t *rkts,
-                                                   void *arg) {
-        rd_kafka_t *rk = rkts->rkts_rk;
-        rd_kafka_timer_t *rtmr = &rk->rk_metadata_cache.rkmc_query_tmr;
-        rd_kafka_itopic_t *rkt;
-        rd_list_t topics;
-
-        rd_kafka_wrlock(rk);
-        rd_list_init(&topics, rk->rk_topic_cnt, rd_free);
-
-        TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
-                int i, no_leader = 0;
-                rd_kafka_topic_rdlock(rkt);
-
-                if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) {
-                        /* Skip topics that are known to not exist. */
-                        rd_kafka_topic_rdunlock(rkt);
-                        continue;
-                }
-
-                no_leader = rkt->rkt_flags & RD_KAFKA_TOPIC_F_LEADER_UNAVAIL;
-
-                /* Check if any partitions are missing their leaders. */
-                for (i = 0 ; !no_leader && i < rkt->rkt_partition_cnt ; i++) {
-                        rd_kafka_toppar_t *rktp =
-                                rd_kafka_toppar_s2i(rkt->rkt_p[i]);
-                        rd_kafka_toppar_lock(rktp);
-                        no_leader = !rktp->rktp_leader &&
-                                !rktp->rktp_next_leader;
-                        rd_kafka_toppar_unlock(rktp);
-                }
-
-                if (no_leader || rkt->rkt_partition_cnt == 0)
-                        rd_list_add(&topics, rd_strdup(rkt->rkt_topic->str));
-
-                rd_kafka_topic_rdunlock(rkt);
-        }
-
-        rd_kafka_wrunlock(rk);
-
-        if (rd_list_cnt(&topics) == 0) {
-                /* No leader-less topics+partitions, stop the timer. */
-                rd_kafka_timer_stop(rkts, rtmr, 1/*lock*/);
-        } else {
-                rd_kafka_metadata_refresh_topics(rk, NULL, &topics, 1/*force*/,
-                                                 "partition leader query");
-                /* Back off next query exponentially until we reach
-                 * the standard query interval - then stop the timer
-                 * since the intervalled querier will do the job for us. */
-                if (rk->rk_conf.metadata_refresh_interval_ms > 0 &&
-                    rtmr->rtmr_interval * 2 / 1000 >=
-                    rk->rk_conf.metadata_refresh_interval_ms)
-                        rd_kafka_timer_stop(rkts, rtmr, 1/*lock*/);
-                else
-                        rd_kafka_timer_backoff(rkts, rtmr,
-                                               (int)rtmr->rtmr_interval);
-        }
-
-        rd_list_destroy(&topics);
-}
-
-
-
-/**
- * @brief Trigger fast leader query to quickly pick up on leader changes.
- *        The fast leader query is a quick query followed by later queries at
- *        exponentially increased intervals until no topics are missing
- *        leaders.
- *
- * @locks none
- * @locality any
- */
-void rd_kafka_metadata_fast_leader_query (rd_kafka_t *rk) {
-        rd_ts_t next;
-
-        /* Restart the timer if it will speed things up. */
-        next = rd_kafka_timer_next(&rk->rk_timers,
-                                   &rk->rk_metadata_cache.rkmc_query_tmr,
-                                   1/*lock*/);
-        if (next == -1 /* not started */ ||
-            next > rk->rk_conf.metadata_refresh_fast_interval_ms*1000) {
-                rd_kafka_dbg(rk, METADATA|RD_KAFKA_DBG_TOPIC, "FASTQUERY",
-                             "Starting fast leader query");
-                rd_kafka_timer_start(&rk->rk_timers,
-                                     &rk->rk_metadata_cache.rkmc_query_tmr,
-                                     rk->rk_conf.
-                                     metadata_refresh_fast_interval_ms*1000,
-                                     rd_kafka_metadata_leader_query_tmr_cb,
-                                     NULL);
-        }
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_metadata.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_metadata.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_metadata.h
deleted file mode 100644
index a0b77e1..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_metadata.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include "rdavl.h"
-
-struct rd_kafka_metadata *
-rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb,
-                         rd_kafka_buf_t *request, rd_kafka_buf_t *rkbuf);
-
-struct rd_kafka_metadata *
-rd_kafka_metadata_copy (const struct rd_kafka_metadata *md, size_t size);
-
-size_t
-rd_kafka_metadata_topic_match (rd_kafka_t *rk, rd_list_t *tinfos,
-                               const rd_kafka_topic_partition_list_t *match);
-size_t
-rd_kafka_metadata_topic_filter (rd_kafka_t *rk, rd_list_t *tinfos,
-                                const rd_kafka_topic_partition_list_t *match);
-
-void rd_kafka_metadata_log (rd_kafka_t *rk, const char *fac,
-                            const struct rd_kafka_metadata *md);
-
-
-
-rd_kafka_resp_err_t
-rd_kafka_metadata_refresh_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb,
-                                  const rd_list_t *topics, int force,
-                                  const char *reason);
-rd_kafka_resp_err_t
-rd_kafka_metadata_refresh_known_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb,
-                                        int force, const char *reason);
-rd_kafka_resp_err_t
-rd_kafka_metadata_refresh_brokers (rd_kafka_t *rk, rd_kafka_broker_t *rkb,
-                                   const char *reason);
-rd_kafka_resp_err_t
-rd_kafka_metadata_refresh_all (rd_kafka_t *rk, rd_kafka_broker_t *rkb,
-                               const char *reason);
-
-rd_kafka_resp_err_t
-rd_kafka_metadata_request (rd_kafka_t *rk, rd_kafka_broker_t *rkb,
-                           const rd_list_t *topics,
-                           const char *reason, rd_kafka_op_t *rko);
-
-
-/**
- * @{
- *
- * @brief Metadata cache
- */
-
-struct rd_kafka_metadata_cache_entry {
-        rd_avl_node_t rkmce_avlnode;             /* rkmc_avl */
-        TAILQ_ENTRY(rd_kafka_metadata_cache_entry) rkmce_link; /* rkmc_expiry */
-        rd_ts_t rkmce_ts_expires;                /* Expire time */
-        rd_ts_t rkmce_ts_insert;                 /* Insert time */
-        rd_kafka_metadata_topic_t rkmce_mtopic;  /* Cached topic metadata */
-        /* rkmce_partitions memory points here. */
-};
-
-#define RD_KAFKA_METADATA_CACHE_VALID(rkmce) \
-        ((rkmce)->rkmce_mtopic.err != RD_KAFKA_RESP_ERR__WAIT_CACHE)
-
-struct rd_kafka_metadata_cache {
-        rd_avl_t         rkmc_avl;
-        TAILQ_HEAD(, rd_kafka_metadata_cache_entry) rkmc_expiry;
-        rd_kafka_timer_t rkmc_expiry_tmr;
-        int              rkmc_cnt;
-
-        /* Protected by full_lock: */
-        mtx_t            rkmc_full_lock;
-        int              rkmc_full_topics_sent; /* Full MetadataRequest for
-                                                 * all topics has been sent,
-                                                 * awaiting response. */
-        int              rkmc_full_brokers_sent; /* Full MetadataRequest for
-                                                  * all brokers (but not topics)
-                                                  * has been sent,
-                                                  * awaiting response. */
-
-        rd_kafka_timer_t rkmc_query_tmr; /* Query timer for topic's without
-                                          * leaders. */
-        cnd_t            rkmc_cnd;       /* cache_wait_change() cond. */
-        mtx_t            rkmc_cnd_lock;  /* lock for rkmc_cnd */
-};
-
-
-
-void rd_kafka_metadata_cache_expiry_start (rd_kafka_t *rk);
-void
-rd_kafka_metadata_cache_topic_update (rd_kafka_t *rk,
-                                      const rd_kafka_metadata_topic_t *mdt);
-void rd_kafka_metadata_cache_update (rd_kafka_t *rk,
-                                     const rd_kafka_metadata_t *md,
-                                     int abs_update);
-struct rd_kafka_metadata_cache_entry *
-rd_kafka_metadata_cache_find (rd_kafka_t *rk, const char *topic, int valid);
-void rd_kafka_metadata_cache_purge_hints (rd_kafka_t *rk,
-                                          const rd_list_t *topics);
-int rd_kafka_metadata_cache_hint (rd_kafka_t *rk,
-                                  const rd_list_t *topics, rd_list_t *dst,
-                                  int replace);
-int rd_kafka_metadata_cache_hint_rktparlist (
-        rd_kafka_t *rk,
-        const rd_kafka_topic_partition_list_t *rktparlist,
-        rd_list_t *dst,
-        int replace);
-
-const rd_kafka_metadata_topic_t *
-rd_kafka_metadata_cache_topic_get (rd_kafka_t *rk, const char *topic,
-                                   int valid);
-int rd_kafka_metadata_cache_topic_partition_get (
-        rd_kafka_t *rk,
-        const rd_kafka_metadata_topic_t **mtopicp,
-        const rd_kafka_metadata_partition_t **mpartp,
-        const char *topic, int32_t partition, int valid);
-
-int rd_kafka_metadata_cache_topics_count_exists (rd_kafka_t *rk,
-                                                 const rd_list_t *topics,
-                                                 int *metadata_agep);
-int rd_kafka_metadata_cache_topics_filter_hinted (rd_kafka_t *rk,
-                                                  rd_list_t *dst,
-                                                  const rd_list_t *src);
-
-void rd_kafka_metadata_fast_leader_query (rd_kafka_t *rk);
-
-void rd_kafka_metadata_cache_init (rd_kafka_t *rk);
-void rd_kafka_metadata_cache_destroy (rd_kafka_t *rk);
-int  rd_kafka_metadata_cache_wait_change (rd_kafka_t *rk, int timeout_ms);
-void rd_kafka_metadata_cache_dump (FILE *fp, rd_kafka_t *rk);
-
-/**@}*/


[32/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_broker.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_broker.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_broker.c
deleted file mode 100644
index 742e0fd..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_broker.c
+++ /dev/null
@@ -1,3797 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-
-#ifndef _MSC_VER
-#define _GNU_SOURCE
-/*
- * AIX defines this and the value needs to be set correctly. For Solaris,
- * src/rd.h defines _POSIX_SOURCE to be 200809L, which corresponds to XPG7,
- * which itself is not compatible with _XOPEN_SOURCE on that platform.
- */
-#if !defined(_AIX) && !defined(__sun)
-#define _XOPEN_SOURCE
-#endif
-#include <signal.h>
-#endif
-
-#include <stdio.h>
-#include <stdarg.h>
-#include <string.h>
-#include <ctype.h>
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_msg.h"
-#include "rdkafka_msgset.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_proto.h"
-#include "rdkafka_buf.h"
-#include "rdkafka_request.h"
-#include "rdkafka_sasl.h"
-#include "rdkafka_interceptor.h"
-#include "rdtime.h"
-#include "rdcrc32.h"
-#include "rdrand.h"
-#include "rdkafka_lz4.h"
-#if WITH_SSL
-#include <openssl/err.h>
-#endif
-#include "rdendian.h"
-
-
-const char *rd_kafka_broker_state_names[] = {
-	"INIT",
-	"DOWN",
-	"CONNECT",
-	"AUTH",
-	"UP",
-        "UPDATE",
-	"APIVERSION_QUERY",
-	"AUTH_HANDSHAKE"
-};
-
-const char *rd_kafka_secproto_names[] = {
-	[RD_KAFKA_PROTO_PLAINTEXT] = "plaintext",
-	[RD_KAFKA_PROTO_SSL] = "ssl",
-	[RD_KAFKA_PROTO_SASL_PLAINTEXT] = "sasl_plaintext",
-	[RD_KAFKA_PROTO_SASL_SSL] = "sasl_ssl",
-	NULL
-};
-
-
-
-
-
-
-
-#define rd_kafka_broker_terminating(rkb) \
-        (rd_refcnt_get(&(rkb)->rkb_refcnt) <= 1)
-
-
-/**
- * Construct broker nodename.
- */
-static void rd_kafka_mk_nodename (char *dest, size_t dsize,
-                                  const char *name, uint16_t port) {
-        rd_snprintf(dest, dsize, "%s:%hu", name, port);
-}
-
-/**
- * Construct descriptive broker name
- */
-static void rd_kafka_mk_brokername (char *dest, size_t dsize,
-				    rd_kafka_secproto_t proto,
-				    const char *nodename, int32_t nodeid,
-				    rd_kafka_confsource_t source) {
-
-	/* Prepend protocol name to brokername, unless it is a
-	 * standard plaintext broker in which case we omit the protocol part. */
-	if (proto != RD_KAFKA_PROTO_PLAINTEXT) {
-		int r = rd_snprintf(dest, dsize, "%s://",
-				    rd_kafka_secproto_names[proto]);
-		if (r >= (int)dsize) /* Skip proto name if it wont fit.. */
-			r = 0;
-
-		dest += r;
-		dsize -= r;
-	}
-
-	if (nodeid == RD_KAFKA_NODEID_UA)
-		rd_snprintf(dest, dsize, "%s/%s",
-			    nodename,
-			    source == RD_KAFKA_INTERNAL ?
-			    "internal":"bootstrap");
-	else
-		rd_snprintf(dest, dsize, "%s/%"PRId32, nodename, nodeid);
-}
-
-
-/**
- * @brief Enable protocol feature(s) for the current broker.
- *
- * Locality: broker thread
- */
-static void rd_kafka_broker_feature_enable (rd_kafka_broker_t *rkb,
-					    int features) {
-	if (features & rkb->rkb_features)
-		return;
-
-	rkb->rkb_features |= features;
-	rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FEATURE,
-		   "FEATURE",
-		   "Updated enabled protocol features +%s to %s",
-		   rd_kafka_features2str(features),
-		   rd_kafka_features2str(rkb->rkb_features));
-}
-
-
-/**
- * @brief Disable protocol feature(s) for the current broker.
- *
- * Locality: broker thread
- */
-static void rd_kafka_broker_feature_disable (rd_kafka_broker_t *rkb,
-						       int features) {
-	if (!(features & rkb->rkb_features))
-		return;
-
-	rkb->rkb_features &= ~features;
-	rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FEATURE,
-		   "FEATURE",
-		   "Updated enabled protocol features -%s to %s",
-		   rd_kafka_features2str(features),
-		   rd_kafka_features2str(rkb->rkb_features));
-}
-
-
-/**
- * @brief Set protocol feature(s) for the current broker.
- *
- * @remark This replaces the previous feature set.
- *
- * @locality broker thread
- * @locks rd_kafka_broker_lock()
- */
-static void rd_kafka_broker_features_set (rd_kafka_broker_t *rkb, int features) {
-	if (rkb->rkb_features == features)
-		return;
-
-	rkb->rkb_features = features;
-	rd_rkb_dbg(rkb, BROKER, "FEATURE",
-		   "Updated enabled protocol features to %s",
-		   rd_kafka_features2str(rkb->rkb_features));
-}
-
-
-/**
- * @brief Check and return supported ApiVersion for \p ApiKey.
- *
- * @returns the highest supported ApiVersion in the specified range (inclusive)
- *          or -1 if the ApiKey is not supported or no matching ApiVersion.
- *          The current feature set is also returned in \p featuresp
- * @locks none
- * @locality any
- */
-int16_t rd_kafka_broker_ApiVersion_supported (rd_kafka_broker_t *rkb,
-                                              int16_t ApiKey,
-                                              int16_t minver, int16_t maxver,
-                                              int *featuresp) {
-        struct rd_kafka_ApiVersion skel = { .ApiKey = ApiKey };
-        struct rd_kafka_ApiVersion ret = RD_ZERO_INIT, *retp;
-
-        rd_kafka_broker_lock(rkb);
-        retp = bsearch(&skel, rkb->rkb_ApiVersions, rkb->rkb_ApiVersions_cnt,
-                       sizeof(*rkb->rkb_ApiVersions),
-                       rd_kafka_ApiVersion_key_cmp);
-        if (retp)
-                ret = *retp;
-        if (featuresp)
-                *featuresp = rkb->rkb_features;
-        rd_kafka_broker_unlock(rkb);
-
-        if (!retp)
-                return -1;
-
-        if (ret.MaxVer < maxver) {
-                if (ret.MaxVer < minver)
-                        return -1;
-                else
-                        return ret.MaxVer;
-        } else if (ret.MinVer > maxver)
-                return -1;
-        else
-                return maxver;
-}
-
-
-/**
- * Locks: rd_kafka_broker_lock() MUST be held.
- * Locality: broker thread
- */
-void rd_kafka_broker_set_state (rd_kafka_broker_t *rkb, int state) {
-	if ((int)rkb->rkb_state == state)
-		return;
-
-	rd_kafka_dbg(rkb->rkb_rk, BROKER, "STATE",
-		     "%s: Broker changed state %s -> %s",
-		     rkb->rkb_name,
-		     rd_kafka_broker_state_names[rkb->rkb_state],
-		     rd_kafka_broker_state_names[state]);
-
-	if (rkb->rkb_source == RD_KAFKA_INTERNAL) {
-		/* no-op */
-	} else if (state == RD_KAFKA_BROKER_STATE_DOWN &&
-		   !rkb->rkb_down_reported &&
-		   rkb->rkb_state != RD_KAFKA_BROKER_STATE_APIVERSION_QUERY) {
-		/* Propagate ALL_BROKERS_DOWN event if all brokers are
-		 * now down, unless we're terminating.
-		 * Dont do this if we're querying for ApiVersion since it
-		 * is bound to fail once on older brokers. */
-		if (rd_atomic32_add(&rkb->rkb_rk->rk_broker_down_cnt, 1) ==
-		    rd_atomic32_get(&rkb->rkb_rk->rk_broker_cnt) &&
-		    !rd_atomic32_get(&rkb->rkb_rk->rk_terminate))
-			rd_kafka_op_err(rkb->rkb_rk,
-					RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN,
-					"%i/%i brokers are down",
-					rd_atomic32_get(&rkb->rkb_rk->
-                                                        rk_broker_down_cnt),
-					rd_atomic32_get(&rkb->rkb_rk->
-                                                        rk_broker_cnt));
-		rkb->rkb_down_reported = 1;
-
-	} else if (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP &&
-		   rkb->rkb_down_reported) {
-		rd_atomic32_sub(&rkb->rkb_rk->rk_broker_down_cnt, 1);
-		rkb->rkb_down_reported = 0;
-	}
-
-	rkb->rkb_state = state;
-        rkb->rkb_ts_state = rd_clock();
-
-	rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
-}
-
-
-/**
- * @brief Locks broker, acquires the states, unlocks, and returns
- *        the state.
- * @locks !broker_lock
- * @locality any
- */
-int rd_kafka_broker_get_state (rd_kafka_broker_t *rkb) {
-        int state;
-        rd_kafka_broker_lock(rkb);
-        state = rkb->rkb_state;
-        rd_kafka_broker_unlock(rkb);
-        return state;
-}
-
-
-/**
- * Failure propagation to application.
- * Will tear down connection to broker and trigger a reconnect.
- *
- * If 'fmt' is NULL nothing will be logged or propagated to the application.
- *
- * \p level is the log level, <=LOG_INFO will be logged while =LOG_DEBUG will
- * be debug-logged.
- * 
- * Locality: Broker thread
- */
-void rd_kafka_broker_fail (rd_kafka_broker_t *rkb,
-                           int level, rd_kafka_resp_err_t err,
-			   const char *fmt, ...) {
-	va_list ap;
-	int errno_save = errno;
-	rd_kafka_bufq_t tmpq_waitresp, tmpq;
-        int old_state;
-
-	rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
-	rd_kafka_dbg(rkb->rkb_rk, BROKER | RD_KAFKA_DBG_PROTOCOL, "BROKERFAIL",
-		     "%s: failed: err: %s: (errno: %s)",
-		     rkb->rkb_name, rd_kafka_err2str(err),
-		     rd_strerror(errno_save));
-
-	rkb->rkb_err.err = errno_save;
-
-	if (rkb->rkb_transport) {
-		rd_kafka_transport_close(rkb->rkb_transport);
-		rkb->rkb_transport = NULL;
-	}
-
-	rkb->rkb_req_timeouts = 0;
-
-	if (rkb->rkb_recv_buf) {
-		rd_kafka_buf_destroy(rkb->rkb_recv_buf);
-		rkb->rkb_recv_buf = NULL;
-	}
-
-	rd_kafka_broker_lock(rkb);
-
-	/* The caller may omit the format if it thinks this is a recurring
-	 * failure, in which case the following things are omitted:
-	 *  - log message
-	 *  - application OP_ERR
-	 *  - metadata request
-	 *
-	 * Dont log anything if this was the termination signal, or if the
-	 * socket disconnected while trying ApiVersionRequest.
-	 */
-	if (fmt &&
-	    !(errno_save == EINTR &&
-	      rd_atomic32_get(&rkb->rkb_rk->rk_terminate)) &&
-	    !(err == RD_KAFKA_RESP_ERR__TRANSPORT &&
-	      rkb->rkb_state == RD_KAFKA_BROKER_STATE_APIVERSION_QUERY)) {
-		int of;
-
-		/* Insert broker name in log message if it fits. */
-		of = rd_snprintf(rkb->rkb_err.msg, sizeof(rkb->rkb_err.msg),
-			      "%s: ", rkb->rkb_name);
-		if (of >= (int)sizeof(rkb->rkb_err.msg))
-			of = 0;
-		va_start(ap, fmt);
-		rd_vsnprintf(rkb->rkb_err.msg+of,
-			  sizeof(rkb->rkb_err.msg)-of, fmt, ap);
-		va_end(ap);
-
-                if (level >= LOG_DEBUG)
-                        rd_kafka_dbg(rkb->rkb_rk, BROKER, "FAIL",
-                                     "%s", rkb->rkb_err.msg);
-                else {
-                        /* Don't log if an error callback is registered */
-                        if (!rkb->rkb_rk->rk_conf.error_cb)
-                                rd_kafka_log(rkb->rkb_rk, level, "FAIL",
-                                             "%s", rkb->rkb_err.msg);
-                        /* Send ERR op back to application for processing. */
-                        rd_kafka_op_err(rkb->rkb_rk, err,
-                                        "%s", rkb->rkb_err.msg);
-                }
-	}
-
-	/* If we're currently asking for ApiVersion and the connection
-	 * went down it probably means the broker does not support that request
-	 * and tore down the connection. In this case we disable that feature flag. */
-	if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_APIVERSION_QUERY)
-		rd_kafka_broker_feature_disable(rkb, RD_KAFKA_FEATURE_APIVERSION);
-
-	/* Set broker state */
-        old_state = rkb->rkb_state;
-	rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_DOWN);
-
-	/* Unlock broker since a requeue will try to lock it. */
-	rd_kafka_broker_unlock(rkb);
-
-	/*
-	 * Purge all buffers
-	 * (put bufs on a temporary queue since bufs may be requeued,
-	 *  make sure outstanding requests are re-enqueued before
-	 *  bufs on outbufs queue.)
-	 */
-	rd_kafka_bufq_init(&tmpq_waitresp);
-	rd_kafka_bufq_init(&tmpq);
-	rd_kafka_bufq_concat(&tmpq_waitresp, &rkb->rkb_waitresps);
-	rd_kafka_bufq_concat(&tmpq, &rkb->rkb_outbufs);
-        rd_atomic32_init(&rkb->rkb_blocking_request_cnt, 0);
-
-	/* Purge the buffers (might get re-enqueued in case of retries) */
-	rd_kafka_bufq_purge(rkb, &tmpq_waitresp, err);
-
-	/* Put the outbufs back on queue */
-	rd_kafka_bufq_concat(&rkb->rkb_outbufs, &tmpq);
-
-	/* Update bufq for connection reset:
-	 *  - Purge connection-setup requests from outbufs since they will be
-	 *    reissued on the next connect.
-	 *  - Reset any partially sent buffer's offset.
-	 */
-	rd_kafka_bufq_connection_reset(rkb, &rkb->rkb_outbufs);
-
-	/* Extra debugging for tracking termination-hang issues:
-	 * show what is keeping this broker from decommissioning. */
-	if (rd_kafka_terminating(rkb->rkb_rk) &&
-	    !rd_kafka_broker_terminating(rkb)) {
-		rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "BRKTERM",
-			   "terminating: broker still has %d refcnt(s), "
-			   "%"PRId32" buffer(s), %d partition(s)",
-			   rd_refcnt_get(&rkb->rkb_refcnt),
-			   rd_kafka_bufq_cnt(&rkb->rkb_outbufs),
-			   rkb->rkb_toppar_cnt);
-		rd_kafka_bufq_dump(rkb, "BRKOUTBUFS", &rkb->rkb_outbufs);
-#if ENABLE_SHAREDPTR_DEBUG
-		if (rd_refcnt_get(&rkb->rkb_refcnt) > 1) {
-			rd_rkb_dbg(rkb, BROKER, "BRKTERM",
-				   "Dumping shared pointers: "
-				   "this broker is %p", rkb);
-			rd_shared_ptrs_dump();
-		}
-#endif
-	}
-
-
-        /* Query for topic leaders to quickly pick up on failover. */
-        if (fmt && err != RD_KAFKA_RESP_ERR__DESTROY &&
-            old_state >= RD_KAFKA_BROKER_STATE_UP)
-                rd_kafka_metadata_refresh_known_topics(rkb->rkb_rk, NULL,
-                                                       1/*force*/,
-                                                       "broker down");
-}
-
-
-
-
-
-/**
- * Scan bufq for buffer timeouts, trigger buffer callback on timeout.
- *
- * If \p partial_cntp is non-NULL any partially sent buffers will increase
- * the provided counter by 1.
- *
- * @returns the number of timed out buffers.
- *
- * @locality broker thread
- */
-static int rd_kafka_broker_bufq_timeout_scan (rd_kafka_broker_t *rkb,
-					      int is_waitresp_q,
-					      rd_kafka_bufq_t *rkbq,
-					      int *partial_cntp,
-					      rd_kafka_resp_err_t err,
-					      rd_ts_t now) {
-	rd_kafka_buf_t *rkbuf, *tmp;
-	int cnt = 0;
-
-	TAILQ_FOREACH_SAFE(rkbuf, &rkbq->rkbq_bufs, rkbuf_link, tmp) {
-
-		if (likely(now && rkbuf->rkbuf_ts_timeout > now))
-			continue;
-
-                if (partial_cntp && rd_slice_offset(&rkbuf->rkbuf_reader) > 0)
-                        (*partial_cntp)++;
-
-		/* Convert rkbuf_ts_sent to elapsed time since request */
-		if (rkbuf->rkbuf_ts_sent)
-			rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent;
-		else
-			rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_enq;
-
-		rd_kafka_bufq_deq(rkbq, rkbuf);
-
-		if (is_waitresp_q && rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING
-		    && rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, 1) == 0)
-			rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
-
-                rd_kafka_buf_callback(rkb->rkb_rk, rkb, err, NULL, rkbuf);
-		cnt++;
-	}
-
-	return cnt;
-}
-
-
-/**
- * Scan the wait-response and outbuf queues for message timeouts.
- *
- * Locality: Broker thread
- */
-static void rd_kafka_broker_timeout_scan (rd_kafka_broker_t *rkb, rd_ts_t now) {
-	int req_cnt, retry_cnt, q_cnt;
-
-	rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
-	/* Outstanding requests waiting for response */
-	req_cnt = rd_kafka_broker_bufq_timeout_scan(
-		rkb, 1, &rkb->rkb_waitresps, NULL,
-		RD_KAFKA_RESP_ERR__TIMED_OUT, now);
-	/* Requests in retry queue */
-	retry_cnt = rd_kafka_broker_bufq_timeout_scan(
-		rkb, 0, &rkb->rkb_retrybufs, NULL,
-		RD_KAFKA_RESP_ERR__TIMED_OUT, now);
-	/* Requests in local queue not sent yet. */
-	q_cnt = rd_kafka_broker_bufq_timeout_scan(
-		rkb, 0, &rkb->rkb_outbufs, &req_cnt,
-		RD_KAFKA_RESP_ERR__TIMED_OUT, now);
-
-	if (req_cnt + retry_cnt + q_cnt > 0) {
-		rd_rkb_dbg(rkb, MSG|RD_KAFKA_DBG_BROKER,
-			   "REQTMOUT", "Timed out %i+%i+%i requests",
-			   req_cnt, retry_cnt, q_cnt);
-
-                /* Fail the broker if socket.max.fails is configured and
-                 * now exceeded. */
-                rkb->rkb_req_timeouts   += req_cnt + q_cnt;
-                rd_atomic64_add(&rkb->rkb_c.req_timeouts, req_cnt + q_cnt);
-
-		/* If this was an in-flight request that timed out, or
-		 * the other queues has reached the socket.max.fails threshold,
-		 * we need to take down the connection. */
-                if ((req_cnt > 0 ||
-		     (rkb->rkb_rk->rk_conf.socket_max_fails &&
-		      rkb->rkb_req_timeouts >=
-		      rkb->rkb_rk->rk_conf.socket_max_fails)) &&
-                    rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP) {
-                        char rttinfo[32];
-                        /* Print average RTT (if avail) to help diagnose. */
-                        rd_avg_calc(&rkb->rkb_avg_rtt, now);
-                        if (rkb->rkb_avg_rtt.ra_v.avg)
-                                rd_snprintf(rttinfo, sizeof(rttinfo),
-                                            " (average rtt %.3fms)",
-                                            (float)(rkb->rkb_avg_rtt.ra_v.avg/
-                                                    1000.0f));
-                        else
-                                rttinfo[0] = 0;
-                        errno = ETIMEDOUT;
-                        rd_kafka_broker_fail(rkb, LOG_ERR,
-                                             RD_KAFKA_RESP_ERR__MSG_TIMED_OUT,
-                                             "%i request(s) timed out: "
-                                             "disconnect%s",
-                                             rkb->rkb_req_timeouts, rttinfo);
-                }
-        }
-}
-
-
-
-static ssize_t
-rd_kafka_broker_send (rd_kafka_broker_t *rkb, rd_slice_t *slice) {
-	ssize_t r;
-	char errstr[128];
-
-	rd_kafka_assert(rkb->rkb_rk, rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP);
-	rd_kafka_assert(rkb->rkb_rk, rkb->rkb_transport);
-
-        r = rd_kafka_transport_send(rkb->rkb_transport, slice,
-                                    errstr, sizeof(errstr));
-
-	if (r == -1) {
-		rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT,
-                                     "Send failed: %s", errstr);
-		rd_atomic64_add(&rkb->rkb_c.tx_err, 1);
-		return -1;
-	}
-
-	rd_atomic64_add(&rkb->rkb_c.tx_bytes, r);
-	rd_atomic64_add(&rkb->rkb_c.tx, 1);
-	return r;
-}
-
-
-
-
-static int rd_kafka_broker_resolve (rd_kafka_broker_t *rkb) {
-	const char *errstr;
-
-	if (rkb->rkb_rsal &&
-	    rkb->rkb_t_rsal_last + rkb->rkb_rk->rk_conf.broker_addr_ttl <
-	    time(NULL)) { // FIXME: rd_clock()
-		/* Address list has expired. */
-		rd_sockaddr_list_destroy(rkb->rkb_rsal);
-		rkb->rkb_rsal = NULL;
-	}
-
-	if (!rkb->rkb_rsal) {
-		/* Resolve */
-
-		rkb->rkb_rsal = rd_getaddrinfo(rkb->rkb_nodename,
-					       RD_KAFKA_PORT_STR,
-					       AI_ADDRCONFIG,
-					       rkb->rkb_rk->rk_conf.
-                                               broker_addr_family,
-                                               SOCK_STREAM,
-					       IPPROTO_TCP, &errstr);
-
-		if (!rkb->rkb_rsal) {
-                        rd_kafka_broker_fail(rkb, LOG_ERR,
-                                             RD_KAFKA_RESP_ERR__RESOLVE,
-                                             /* Avoid duplicate log messages */
-                                             rkb->rkb_err.err == errno ?
-                                             NULL :
-                                             "Failed to resolve '%s': %s",
-                                             rkb->rkb_nodename, errstr);
-			return -1;
-		}
-	}
-
-	return 0;
-}
-
-
-static void rd_kafka_broker_buf_enq0 (rd_kafka_broker_t *rkb,
-				      rd_kafka_buf_t *rkbuf, int at_head) {
-	rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
-        rkbuf->rkbuf_ts_enq = rd_clock();
-
-        /* Set timeout if not already set */
-        if (!rkbuf->rkbuf_ts_timeout)
-        	rkbuf->rkbuf_ts_timeout = rkbuf->rkbuf_ts_enq +
-                        rkb->rkb_rk->rk_conf.socket_timeout_ms * 1000;
-
-	if (unlikely(at_head)) {
-		/* Insert message at head of queue */
-		rd_kafka_buf_t *prev, *after = NULL;
-
-		/* Put us behind any flash messages and partially sent buffers.
-		 * We need to check if buf corrid is set rather than
-		 * rkbuf_of since SSL_write may return 0 and expect the
-		 * exact same arguments the next call. */
-		TAILQ_FOREACH(prev, &rkb->rkb_outbufs.rkbq_bufs, rkbuf_link) {
-			if (!(prev->rkbuf_flags & RD_KAFKA_OP_F_FLASH) &&
-			    prev->rkbuf_corrid == 0)
-				break;
-			after = prev;
-		}
-
-		if (after)
-			TAILQ_INSERT_AFTER(&rkb->rkb_outbufs.rkbq_bufs,
-					   after, rkbuf, rkbuf_link);
-		else
-			TAILQ_INSERT_HEAD(&rkb->rkb_outbufs.rkbq_bufs,
-					  rkbuf, rkbuf_link);
-	} else {
-		/* Insert message at tail of queue */
-		TAILQ_INSERT_TAIL(&rkb->rkb_outbufs.rkbq_bufs,
-				  rkbuf, rkbuf_link);
-	}
-
-	(void)rd_atomic32_add(&rkb->rkb_outbufs.rkbq_cnt, 1);
-	(void)rd_atomic32_add(&rkb->rkb_outbufs.rkbq_msg_cnt,
-                            rd_atomic32_get(&rkbuf->rkbuf_msgq.rkmq_msg_cnt));
-}
-
-
-/**
- * Finalize a stuffed rkbuf for sending to broker.
- */
-static void rd_kafka_buf_finalize (rd_kafka_t *rk, rd_kafka_buf_t *rkbuf) {
-        size_t totsize;
-
-        /* Calculate total request buffer length. */
-        totsize = rd_buf_len(&rkbuf->rkbuf_buf) - 4;
-        rd_assert(totsize <= (size_t)rk->rk_conf.max_msg_size);
-
-        /* Set up a buffer reader for sending the buffer. */
-        rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
-
-        /**
-         * Update request header fields
-         */
-        /* Total reuqest length */
-        rd_kafka_buf_update_i32(rkbuf, 0, (int32_t)totsize);
-
-        /* ApiVersion */
-        rd_kafka_buf_update_i16(rkbuf, 4+2, rkbuf->rkbuf_reqhdr.ApiVersion);
-}
-
-
-void rd_kafka_broker_buf_enq1 (rd_kafka_broker_t *rkb,
-                               rd_kafka_buf_t *rkbuf,
-                               rd_kafka_resp_cb_t *resp_cb,
-                               void *opaque) {
-
-
-        rkbuf->rkbuf_cb     = resp_cb;
-	rkbuf->rkbuf_opaque = opaque;
-
-        rd_kafka_buf_finalize(rkb->rkb_rk, rkbuf);
-
-	rd_kafka_broker_buf_enq0(rkb, rkbuf,
-				 (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLASH)?
-				 1/*head*/: 0/*tail*/);
-}
-
-
-/**
- * Enqueue buffer on broker's xmit queue, but fail buffer immediately
- * if broker is not up.
- *
- * Locality: broker thread
- */
-static int rd_kafka_broker_buf_enq2 (rd_kafka_broker_t *rkb,
-				      rd_kafka_buf_t *rkbuf) {
-        if (unlikely(rkb->rkb_source == RD_KAFKA_INTERNAL)) {
-                /* Fail request immediately if this is the internal broker. */
-                rd_kafka_buf_callback(rkb->rkb_rk, rkb,
-				      RD_KAFKA_RESP_ERR__TRANSPORT,
-                                      NULL, rkbuf);
-                return -1;
-        }
-
-	rd_kafka_broker_buf_enq0(rkb, rkbuf,
-				 (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLASH)?
-				 1/*head*/: 0/*tail*/);
-
-	return 0;
-}
-
-
-
-/**
- * Enqueue buffer for tranmission.
- * Responses are enqueued on 'replyq' (RD_KAFKA_OP_RECV_BUF)
- *
- * Locality: any thread
- */
-void rd_kafka_broker_buf_enq_replyq (rd_kafka_broker_t *rkb,
-                                     rd_kafka_buf_t *rkbuf,
-                                     rd_kafka_replyq_t replyq,
-                                     rd_kafka_resp_cb_t *resp_cb,
-                                     void *opaque) {
-
-        assert(rkbuf->rkbuf_rkb == rkb);
-        if (resp_cb) {
-                rkbuf->rkbuf_replyq = replyq;
-                rkbuf->rkbuf_cb     = resp_cb;
-                rkbuf->rkbuf_opaque = opaque;
-        } else {
-		rd_dassert(!replyq.q);
-	}
-
-        rd_kafka_buf_finalize(rkb->rkb_rk, rkbuf);
-
-
-	if (thrd_is_current(rkb->rkb_thread)) {
-		rd_kafka_broker_buf_enq2(rkb, rkbuf);
-
-	} else {
-		rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_XMIT_BUF);
-		rko->rko_u.xbuf.rkbuf = rkbuf;
-		rd_kafka_q_enq(rkb->rkb_ops, rko);
-	}
-}
-
-
-
-
-/**
- * @returns the current broker state change version.
- *          Pass this value to fugure rd_kafka_brokers_wait_state_change() calls
- *          to avoid the race condition where a state-change happens between
- *          an initial call to some API that fails and the sub-sequent
- *          .._wait_state_change() call.
- */
-int rd_kafka_brokers_get_state_version (rd_kafka_t *rk) {
-	int version;
-	mtx_lock(&rk->rk_broker_state_change_lock);
-	version = rk->rk_broker_state_change_version;
-	mtx_unlock(&rk->rk_broker_state_change_lock);
-	return version;
-}
-
-/**
- * @brief Wait at most \p timeout_ms for any state change for any broker.
- *        \p stored_version is the value previously returned by
- *        rd_kafka_brokers_get_state_version() prior to another API call
- *        that failed due to invalid state.
- *
- * Triggers:
- *   - broker state changes
- *   - broker transitioning from blocking to non-blocking
- *   - partition leader changes
- *   - group state changes
- *
- * @remark There is no guarantee that a state change actually took place.
- *
- * @returns 1 if a state change was signaled (maybe), else 0 (timeout)
- *
- * @locality any thread
- */
-int rd_kafka_brokers_wait_state_change (rd_kafka_t *rk, int stored_version,
-					int timeout_ms) {
-	int r;
-	mtx_lock(&rk->rk_broker_state_change_lock);
-	if (stored_version != rk->rk_broker_state_change_version)
-		r = 1;
-	else
-		r = cnd_timedwait_ms(&rk->rk_broker_state_change_cnd,
-				     &rk->rk_broker_state_change_lock,
-				     timeout_ms) == thrd_success;
-	mtx_unlock(&rk->rk_broker_state_change_lock);
-	return r;
-}
-
-
-/**
- * @brief Broadcast broker state change to listeners, if any.
- *
- * @locality any thread
- */
-void rd_kafka_brokers_broadcast_state_change (rd_kafka_t *rk) {
-	rd_kafka_dbg(rk, GENERIC, "BROADCAST",
-		     "Broadcasting state change");
-	mtx_lock(&rk->rk_broker_state_change_lock);
-	rk->rk_broker_state_change_version++;
-	cnd_broadcast(&rk->rk_broker_state_change_cnd);
-	mtx_unlock(&rk->rk_broker_state_change_lock);
-}
-
-
-/**
- * Returns a random broker (with refcnt increased) in state 'state'.
- * Uses Reservoir sampling.
- *
- * 'filter' is an optional callback used to filter out undesired brokers.
- * The filter function should return 1 to filter out a broker, or 0 to keep it
- * in the list of eligible brokers to return.
- * rd_kafka_broker_lock() is held during the filter callback.
- *
- * Locks: rd_kafka_rdlock(rk) MUST be held.
- * Locality: any thread
- */
-rd_kafka_broker_t *rd_kafka_broker_any (rd_kafka_t *rk, int state,
-                                        int (*filter) (rd_kafka_broker_t *rkb,
-                                                       void *opaque),
-                                        void *opaque) {
-	rd_kafka_broker_t *rkb, *good = NULL;
-        int cnt = 0;
-
-	TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
-		rd_kafka_broker_lock(rkb);
-		if ((int)rkb->rkb_state == state &&
-                    (!filter || !filter(rkb, opaque))) {
-                        if (cnt < 1 || rd_jitter(0, cnt) < 1) {
-                                if (good)
-                                        rd_kafka_broker_destroy(good);
-                                rd_kafka_broker_keep(rkb);
-                                good = rkb;
-                        }
-                        cnt += 1;
-                }
-		rd_kafka_broker_unlock(rkb);
-	}
-
-        return good;
-}
-
-
-/**
- * @brief Spend at most \p timeout_ms to acquire a usable (Up && non-blocking)
- *        broker.
- *
- * @returns A probably usable broker with increased refcount, or NULL on timeout
- * @locks rd_kafka_*lock() if !do_lock
- * @locality any
- */
-rd_kafka_broker_t *rd_kafka_broker_any_usable (rd_kafka_t *rk,
-                                                int timeout_ms,
-                                                int do_lock) {
-	const rd_ts_t ts_end = rd_timeout_init(timeout_ms);
-
-	while (1) {
-		rd_kafka_broker_t *rkb;
-		int remains;
-		int version = rd_kafka_brokers_get_state_version(rk);
-
-                /* Try non-blocking (e.g., non-fetching) brokers first. */
-                if (do_lock)
-                        rd_kafka_rdlock(rk);
-                rkb = rd_kafka_broker_any(rk, RD_KAFKA_BROKER_STATE_UP,
-                                          rd_kafka_broker_filter_non_blocking,
-                                          NULL);
-                if (!rkb)
-                        rkb = rd_kafka_broker_any(rk, RD_KAFKA_BROKER_STATE_UP,
-                                                  NULL, NULL);
-                if (do_lock)
-                        rd_kafka_rdunlock(rk);
-
-                if (rkb)
-                        return rkb;
-
-		remains = rd_timeout_remains(ts_end);
-		if (rd_timeout_expired(remains))
-			return NULL;
-
-		rd_kafka_brokers_wait_state_change(rk, version, remains);
-	}
-
-	return NULL;
-}
-
-
-
-/**
- * Returns a broker in state `state`, preferring the one with
- * matching `broker_id`.
- * Uses Reservoir sampling.
- *
- * Locks: rd_kafka_rdlock(rk) MUST be held.
- * Locality: any thread
- */
-rd_kafka_broker_t *rd_kafka_broker_prefer (rd_kafka_t *rk, int32_t broker_id,
-					   int state) {
-	rd_kafka_broker_t *rkb, *good = NULL;
-        int cnt = 0;
-
-	TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
-		rd_kafka_broker_lock(rkb);
-		if ((int)rkb->rkb_state == state) {
-                        if (broker_id != -1 && rkb->rkb_nodeid == broker_id) {
-                                if (good)
-                                        rd_kafka_broker_destroy(good);
-                                rd_kafka_broker_keep(rkb);
-                                good = rkb;
-                                rd_kafka_broker_unlock(rkb);
-                                break;
-                        }
-                        if (cnt < 1 || rd_jitter(0, cnt) < 1) {
-                                if (good)
-                                        rd_kafka_broker_destroy(good);
-                                rd_kafka_broker_keep(rkb);
-                                good = rkb;
-                        }
-                        cnt += 1;
-                }
-		rd_kafka_broker_unlock(rkb);
-	}
-
-        return good;
-}
-
-
-
-
-
-
-/**
- * Find a waitresp (rkbuf awaiting response) by the correlation id.
- */
-static rd_kafka_buf_t *rd_kafka_waitresp_find (rd_kafka_broker_t *rkb,
-					       int32_t corrid) {
-	rd_kafka_buf_t *rkbuf;
-	rd_ts_t now = rd_clock();
-
-	rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
-	TAILQ_FOREACH(rkbuf, &rkb->rkb_waitresps.rkbq_bufs, rkbuf_link)
-		if (rkbuf->rkbuf_corrid == corrid) {
-			/* Convert ts_sent to RTT */
-			rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent;
-			rd_avg_add(&rkb->rkb_avg_rtt, rkbuf->rkbuf_ts_sent);
-
-                        if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING &&
-			    rd_atomic32_sub(&rkb->rkb_blocking_request_cnt,
-					    1) == 1)
-				rd_kafka_brokers_broadcast_state_change(
-					rkb->rkb_rk);
-
-			rd_kafka_bufq_deq(&rkb->rkb_waitresps, rkbuf);
-			return rkbuf;
-		}
-	return NULL;
-}
-
-
-
-
-/**
- * Map a response message to a request.
- */
-static int rd_kafka_req_response (rd_kafka_broker_t *rkb,
-				  rd_kafka_buf_t *rkbuf) {
-	rd_kafka_buf_t *req;
-
-	rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
-
-	/* Find corresponding request message by correlation id */
-	if (unlikely(!(req =
-		       rd_kafka_waitresp_find(rkb,
-					      rkbuf->rkbuf_reshdr.CorrId)))) {
-		/* unknown response. probably due to request timeout */
-                rd_atomic64_add(&rkb->rkb_c.rx_corrid_err, 1);
-		rd_rkb_dbg(rkb, BROKER, "RESPONSE",
-			   "Response for unknown CorrId %"PRId32" (timed out?)",
-			   rkbuf->rkbuf_reshdr.CorrId);
-                rd_kafka_buf_destroy(rkbuf);
-                return -1;
-	}
-
-	rd_rkb_dbg(rkb, PROTOCOL, "RECV",
-		   "Received %sResponse (v%hd, %"PRIusz" bytes, CorrId %"PRId32
-		   ", rtt %.2fms)",
-		   rd_kafka_ApiKey2str(req->rkbuf_reqhdr.ApiKey),
-                   req->rkbuf_reqhdr.ApiVersion,
-		   rkbuf->rkbuf_totlen, rkbuf->rkbuf_reshdr.CorrId,
-		   (float)req->rkbuf_ts_sent / 1000.0f);
-
-        /* Set up response reader slice starting past the response header */
-        rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf,
-                      RD_KAFKAP_RESHDR_SIZE,
-                      rd_buf_len(&rkbuf->rkbuf_buf) - RD_KAFKAP_RESHDR_SIZE);
-
-        if (!rkbuf->rkbuf_rkb) {
-                rkbuf->rkbuf_rkb = rkb;
-                rd_kafka_broker_keep(rkbuf->rkbuf_rkb);
-        } else
-                rd_assert(rkbuf->rkbuf_rkb == rkb);
-
-	/* Call callback. */
-        rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, rkbuf, req);
-
-	return 0;
-}
-
-
-
-
-int rd_kafka_recv (rd_kafka_broker_t *rkb) {
-	rd_kafka_buf_t *rkbuf;
-	ssize_t r;
-        /* errstr is not set by buf_read errors, so default it here. */
-        char errstr[512] = "Protocol parse failure";
-        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-	const int log_decode_errors = LOG_ERR;
-
-
-        /* It is impossible to estimate the correct size of the response
-         * so we split the read up in two parts: first we read the protocol
-         * length and correlation id (i.e., the Response header), and then
-         * when we know the full length of the response we allocate a new
-         * buffer and call receive again.
-         * All this in an async fashion (e.g., partial reads).
-         */
-	if (!(rkbuf = rkb->rkb_recv_buf)) {
-		/* No receive in progress: create new buffer */
-
-                rkbuf = rd_kafka_buf_new(2, RD_KAFKAP_RESHDR_SIZE);
-
-		rkb->rkb_recv_buf = rkbuf;
-
-                /* Set up buffer reader for the response header. */
-                rd_buf_write_ensure(&rkbuf->rkbuf_buf,
-                                    RD_KAFKAP_RESHDR_SIZE,
-                                    RD_KAFKAP_RESHDR_SIZE);
-        }
-
-        rd_dassert(rd_buf_write_remains(&rkbuf->rkbuf_buf) > 0);
-
-        r = rd_kafka_transport_recv(rkb->rkb_transport, &rkbuf->rkbuf_buf,
-                                    errstr, sizeof(errstr));
-        if (unlikely(r <= 0)) {
-                if (r == 0)
-                        return 0; /* EAGAIN */
-                err = RD_KAFKA_RESP_ERR__TRANSPORT;
-                rd_atomic64_add(&rkb->rkb_c.rx_err, 1);
-                goto err;
-        }
-
-	if (rkbuf->rkbuf_totlen == 0) {
-		/* Packet length not known yet. */
-
-                if (unlikely(rd_buf_write_pos(&rkbuf->rkbuf_buf) <
-                             RD_KAFKAP_RESHDR_SIZE)) {
-			/* Need response header for packet length and corrid.
-			 * Wait for more data. */ 
-			return 0;
-		}
-
-                rd_assert(!rkbuf->rkbuf_rkb);
-                rkbuf->rkbuf_rkb = rkb; /* Protocol parsing code needs
-                                         * the rkb for logging, but we dont
-                                         * want to keep a reference to the
-                                         * broker this early since that extra
-                                         * refcount will mess with the broker's
-                                         * refcount-based termination code. */
-
-                /* Initialize reader */
-                rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, 0,
-                              RD_KAFKAP_RESHDR_SIZE);
-
-		/* Read protocol header */
-		rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reshdr.Size);
-		rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reshdr.CorrId);
-
-                rkbuf->rkbuf_rkb = NULL; /* Reset */
-
-		rkbuf->rkbuf_totlen = rkbuf->rkbuf_reshdr.Size;
-
-		/* Make sure message size is within tolerable limits. */
-		if (rkbuf->rkbuf_totlen < 4/*CorrId*/ ||
-		    rkbuf->rkbuf_totlen >
-		    (size_t)rkb->rkb_rk->rk_conf.recv_max_msg_size) {
-                        rd_snprintf(errstr, sizeof(errstr),
-                                    "Invalid response size %"PRId32" (0..%i): "
-                                    "increase receive.message.max.bytes",
-                                    rkbuf->rkbuf_reshdr.Size,
-                                    rkb->rkb_rk->rk_conf.recv_max_msg_size);
-                        err = RD_KAFKA_RESP_ERR__BAD_MSG;
-			rd_atomic64_add(&rkb->rkb_c.rx_err, 1);
-			goto err;
-		}
-
-		rkbuf->rkbuf_totlen -= 4; /*CorrId*/
-
-		if (rkbuf->rkbuf_totlen > 0) {
-			/* Allocate another buffer that fits all data (short of
-			 * the common response header). We want all
-			 * data to be in contigious memory. */
-
-                        rd_buf_write_ensure_contig(&rkbuf->rkbuf_buf,
-                                                   rkbuf->rkbuf_totlen);
-		}
-	}
-
-        if (rd_buf_write_pos(&rkbuf->rkbuf_buf) - RD_KAFKAP_RESHDR_SIZE ==
-            rkbuf->rkbuf_totlen) {
-		/* Message is complete, pass it on to the original requester. */
-		rkb->rkb_recv_buf = NULL;
-                rd_atomic64_add(&rkb->rkb_c.rx, 1);
-                rd_atomic64_add(&rkb->rkb_c.rx_bytes,
-                                rd_buf_write_pos(&rkbuf->rkbuf_buf));
-		rd_kafka_req_response(rkb, rkbuf);
-	}
-
-	return 1;
-
- err_parse:
-        err = rkbuf->rkbuf_err;
- err:
-	rd_kafka_broker_fail(rkb,
-                             !rkb->rkb_rk->rk_conf.log_connection_close &&
-                             !strcmp(errstr, "Disconnected") ?
-                             LOG_DEBUG : LOG_ERR, err,
-                             "Receive failed: %s", errstr);
-	return -1;
-}
-
-
-/**
- * Linux version of socket_cb providing racefree CLOEXEC.
- */
-int rd_kafka_socket_cb_linux (int domain, int type, int protocol,
-                              void *opaque) {
-#ifdef SOCK_CLOEXEC
-        return socket(domain, type | SOCK_CLOEXEC, protocol);
-#else
-        return rd_kafka_socket_cb_generic(domain, type, protocol, opaque);
-#endif
-}
-
-/**
- * Fallback version of socket_cb NOT providing racefree CLOEXEC,
- * but setting CLOEXEC after socket creation (if FD_CLOEXEC is defined).
- */
-int rd_kafka_socket_cb_generic (int domain, int type, int protocol,
-                                void *opaque) {
-        int s;
-        int on = 1;
-        s = (int)socket(domain, type, protocol);
-        if (s == -1)
-                return -1;
-#ifdef FD_CLOEXEC
-        fcntl(s, F_SETFD, FD_CLOEXEC, &on);
-#endif
-        return s;
-}
-
-
-/**
- * Initiate asynchronous connection attempt to the next address
- * in the broker's address list.
- * While the connect is asynchronous and its IO served in the CONNECT state,
- * the initial name resolve is blocking.
- *
- * Returns -1 on error, else 0.
- */
-static int rd_kafka_broker_connect (rd_kafka_broker_t *rkb) {
-	const rd_sockaddr_inx_t *sinx;
-	char errstr[512];
-
-	rd_rkb_dbg(rkb, BROKER, "CONNECT",
-		"broker in state %s connecting",
-		rd_kafka_broker_state_names[rkb->rkb_state]);
-
-	if (rd_kafka_broker_resolve(rkb) == -1)
-		return -1;
-
-	sinx = rd_sockaddr_list_next(rkb->rkb_rsal);
-
-	rd_kafka_assert(rkb->rkb_rk, !rkb->rkb_transport);
-
-	if (!(rkb->rkb_transport = rd_kafka_transport_connect(rkb, sinx,
-		errstr, sizeof(errstr)))) {
-		/* Avoid duplicate log messages */
-		if (rkb->rkb_err.err == errno)
-			rd_kafka_broker_fail(rkb, LOG_DEBUG,
-                                             RD_KAFKA_RESP_ERR__FAIL, NULL);
-		else
-			rd_kafka_broker_fail(rkb, LOG_ERR,
-                                             RD_KAFKA_RESP_ERR__TRANSPORT,
-					     "%s", errstr);
-		return -1;
-	}
-
-	rd_kafka_broker_lock(rkb);
-	rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_CONNECT);
-	rd_kafka_broker_unlock(rkb);
-
-	return 0;
-}
-
-
-/**
- * @brief Call when connection is ready to transition to fully functional
- *        UP state.
- *
- * @locality Broker thread
- */
-void rd_kafka_broker_connect_up (rd_kafka_broker_t *rkb) {
-
-	rkb->rkb_max_inflight = rkb->rkb_rk->rk_conf.max_inflight;
-        rkb->rkb_err.err = 0;
-
-	rd_kafka_broker_lock(rkb);
-	rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_UP);
-	rd_kafka_broker_unlock(rkb);
-
-        /* Request metadata (async):
-         * try locally known topics first and if there are none try
-         * getting just the broker list. */
-        if (rd_kafka_metadata_refresh_known_topics(NULL, rkb, 0/*dont force*/,
-                                                   "connected") ==
-            RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
-                rd_kafka_metadata_refresh_brokers(NULL, rkb, "connected");
-}
-
-
-
-static void rd_kafka_broker_connect_auth (rd_kafka_broker_t *rkb);
-
-
-/**
- * @brief Parses and handles SaslMechanism response, transitions
- *        the broker state.
- *
- */
-static void
-rd_kafka_broker_handle_SaslHandshake (rd_kafka_t *rk,
-				      rd_kafka_broker_t *rkb,
-				      rd_kafka_resp_err_t err,
-				      rd_kafka_buf_t *rkbuf,
-				      rd_kafka_buf_t *request,
-				      void *opaque) {
-        const int log_decode_errors = LOG_ERR;
-	int32_t MechCnt;
-	int16_t ErrorCode;
-	int i = 0;
-	char *mechs = "(n/a)";
-	size_t msz, mof = 0;
-
-	if (err == RD_KAFKA_RESP_ERR__DESTROY)
-		return;
-
-        if (err)
-                goto err;
-
-	rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-        rd_kafka_buf_read_i32(rkbuf, &MechCnt);
-
-	/* Build a CSV string of supported mechanisms. */
-	msz = RD_MIN(511, MechCnt * 32);
-	mechs = rd_alloca(msz);
-	*mechs = '\0';
-
-	for (i = 0 ; i < MechCnt ; i++) {
-		rd_kafkap_str_t mech;
-		rd_kafka_buf_read_str(rkbuf, &mech);
-
-		mof += rd_snprintf(mechs+mof, msz-mof, "%s%.*s",
-				   i ? ",":"", RD_KAFKAP_STR_PR(&mech));
-
-		if (mof >= msz)
-			break;
-        }
-
-	rd_rkb_dbg(rkb,
-		   PROTOCOL | RD_KAFKA_DBG_SECURITY | RD_KAFKA_DBG_BROKER,
-		   "SASLMECHS", "Broker supported SASL mechanisms: %s",
-		   mechs);
-
-	if (ErrorCode) {
-		err = ErrorCode;
-		goto err;
-	}
-
-	/* Circle back to connect_auth() to start proper AUTH state. */
-	rd_kafka_broker_connect_auth(rkb);
-	return;
-
- err_parse:
-        err = rkbuf->rkbuf_err;
- err:
-	rd_kafka_broker_fail(rkb, LOG_ERR,
-			     RD_KAFKA_RESP_ERR__AUTHENTICATION,
-			     "SASL %s mechanism handshake failed: %s: "
-			     "broker's supported mechanisms: %s",
-                             rkb->rkb_rk->rk_conf.sasl.mechanisms,
-			     rd_kafka_err2str(err), mechs);
-}
-
-
-/**
- * @brief Transition state to:
- *        - AUTH_HANDSHAKE (if SASL is configured and handshakes supported)
- *        - AUTH (if SASL is configured but no handshake is required or
- *                not supported, or has already taken place.)
- *        - UP (if SASL is not configured)
- */
-static void rd_kafka_broker_connect_auth (rd_kafka_broker_t *rkb) {
-
-	if ((rkb->rkb_proto == RD_KAFKA_PROTO_SASL_PLAINTEXT ||
-	     rkb->rkb_proto == RD_KAFKA_PROTO_SASL_SSL)) {
-
-		rd_rkb_dbg(rkb, SECURITY | RD_KAFKA_DBG_BROKER, "AUTH",
-			   "Auth in state %s (handshake %ssupported)",
-			   rd_kafka_broker_state_names[rkb->rkb_state],
-			   (rkb->rkb_features&RD_KAFKA_FEATURE_SASL_HANDSHAKE)
-			   ? "" : "not ");
-
-		/* Broker >= 0.10.0: send request to select mechanism */
-		if (rkb->rkb_state != RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE &&
-		    (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_HANDSHAKE)) {
-
-			rd_kafka_broker_lock(rkb);
-			rd_kafka_broker_set_state(
-				rkb, RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE);
-			rd_kafka_broker_unlock(rkb);
-
-			rd_kafka_SaslHandshakeRequest(
-				rkb, rkb->rkb_rk->rk_conf.sasl.mechanisms,
-				RD_KAFKA_NO_REPLYQ,
-				rd_kafka_broker_handle_SaslHandshake,
-				NULL, 1 /* flash */);
-
-		} else {
-			/* Either Handshake succeeded (protocol selected)
-			 * or Handshakes were not supported.
-			 * In both cases continue with authentication. */
-			char sasl_errstr[512];
-
-			rd_kafka_broker_lock(rkb);
-			rd_kafka_broker_set_state(rkb,
-						  RD_KAFKA_BROKER_STATE_AUTH);
-			rd_kafka_broker_unlock(rkb);
-
-			if (rd_kafka_sasl_client_new(
-				    rkb->rkb_transport, sasl_errstr,
-				    sizeof(sasl_errstr)) == -1) {
-				errno = EINVAL;
-				rd_kafka_broker_fail(
-					rkb, LOG_ERR,
-					RD_KAFKA_RESP_ERR__AUTHENTICATION,
-					"Failed to initialize "
-					"SASL authentication: %s",
-					sasl_errstr);
-				return;
-			}
-
-			/* Enter non-Kafka-protocol-framed SASL communication
-			 * state handled in rdkafka_sasl.c */
-			rd_kafka_broker_lock(rkb);
-			rd_kafka_broker_set_state(rkb,
-						  RD_KAFKA_BROKER_STATE_AUTH);
-			rd_kafka_broker_unlock(rkb);
-		}
-
-		return;
-	}
-
-	/* No authentication required. */
-	rd_kafka_broker_connect_up(rkb);
-}
-
-
-/**
- * @brief Specify API versions to use for this connection.
- *
- * @param apis is an allocated list of supported partitions.
- *        If NULL the default set will be used based on the
- *        \p broker.version.fallback property.
- * @param api_cnt number of elements in \p apis
- *
- * @remark \p rkb takes ownership of \p apis.
- *
- * @locality Broker thread
- * @locks none
- */
-static void rd_kafka_broker_set_api_versions (rd_kafka_broker_t *rkb,
-					      struct rd_kafka_ApiVersion *apis,
-					      size_t api_cnt) {
-
-        rd_kafka_broker_lock(rkb);
-
-	if (rkb->rkb_ApiVersions)
-		rd_free(rkb->rkb_ApiVersions);
-
-
-	if (!apis) {
-		rd_rkb_dbg(rkb, PROTOCOL | RD_KAFKA_DBG_BROKER, "APIVERSION",
-			   "Using (configuration fallback) %s protocol features",
-			   rkb->rkb_rk->rk_conf.broker_version_fallback);
-
-
-		rd_kafka_get_legacy_ApiVersions(rkb->rkb_rk->rk_conf.
-						broker_version_fallback,
-						&apis, &api_cnt,
-						rkb->rkb_rk->rk_conf.
-						broker_version_fallback);
-
-		/* Make a copy to store on broker. */
-		rd_kafka_ApiVersions_copy(apis, api_cnt, &apis, &api_cnt);
-	}
-
-	rkb->rkb_ApiVersions = apis;
-	rkb->rkb_ApiVersions_cnt = api_cnt;
-
-	/* Update feature set based on supported broker APIs. */
-	rd_kafka_broker_features_set(rkb,
-				     rd_kafka_features_check(rkb, apis, api_cnt));
-
-        rd_kafka_broker_unlock(rkb);
-}
-
-
-/**
- * Handler for ApiVersion response.
- */
-static void
-rd_kafka_broker_handle_ApiVersion (rd_kafka_t *rk,
-				   rd_kafka_broker_t *rkb,
-				   rd_kafka_resp_err_t err,
-				   rd_kafka_buf_t *rkbuf,
-				   rd_kafka_buf_t *request, void *opaque) {
-	struct rd_kafka_ApiVersion *apis;
-	size_t api_cnt;
-
-	if (err == RD_KAFKA_RESP_ERR__DESTROY)
-		return;
-
-	err = rd_kafka_handle_ApiVersion(rk, rkb, err, rkbuf, request,
-					 &apis, &api_cnt);
-
-	if (err) {
-		rd_kafka_broker_fail(rkb, LOG_DEBUG,
-				     RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED,
-				     "ApiVersionRequest failed: %s: "
-				     "probably due to old broker version",
-				     rd_kafka_err2str(err));
-		return;
-	}
-
-	rd_kafka_broker_set_api_versions(rkb, apis, api_cnt);
-
-	rd_kafka_broker_connect_auth(rkb);
-}
-
-
-/**
- * Call when asynchronous connection attempt completes, either succesfully
- * (if errstr is NULL) or fails.
- *
- * Locality: broker thread
- */
-void rd_kafka_broker_connect_done (rd_kafka_broker_t *rkb, const char *errstr) {
-
-	if (errstr) {
-		/* Connect failed */
-                rd_kafka_broker_fail(rkb,
-                                     errno != 0 && rkb->rkb_err.err == errno ?
-                                     LOG_DEBUG : LOG_ERR,
-                                     RD_KAFKA_RESP_ERR__TRANSPORT,
-                                     "%s", errstr);
-		return;
-	}
-
-	/* Connect succeeded */
-	rkb->rkb_connid++;
-	rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL,
-		   "CONNECTED", "Connected (#%d)", rkb->rkb_connid);
-	rkb->rkb_err.err = 0;
-	rkb->rkb_max_inflight = 1; /* Hold back other requests until
-				    * ApiVersion, SaslHandshake, etc
-				    * are done. */
-
-	rd_kafka_transport_poll_set(rkb->rkb_transport, POLLIN);
-
-	if (rkb->rkb_rk->rk_conf.api_version_request &&
-	    rd_interval_immediate(&rkb->rkb_ApiVersion_fail_intvl, 0, 0) > 0) {
-		/* Use ApiVersion to query broker for supported API versions. */
-		rd_kafka_broker_feature_enable(rkb, RD_KAFKA_FEATURE_APIVERSION);
-	}
-
-
-	if (rkb->rkb_features & RD_KAFKA_FEATURE_APIVERSION) {
-		/* Query broker for supported API versions.
-		 * This may fail with a disconnect on non-supporting brokers
-		 * so hold off any other requests until we get a response,
-		 * and if the connection is torn down we disable this feature. */
-		rd_kafka_broker_lock(rkb);
-		rd_kafka_broker_set_state(rkb,RD_KAFKA_BROKER_STATE_APIVERSION_QUERY);
-		rd_kafka_broker_unlock(rkb);
-
-		rd_kafka_ApiVersionRequest(
-			rkb, RD_KAFKA_NO_REPLYQ,
-			rd_kafka_broker_handle_ApiVersion, NULL,
-			1 /*Flash message: prepend to transmit queue*/);
-	} else {
-
-		/* Use configured broker.version.fallback to
-		 * figure out API versions */
-		rd_kafka_broker_set_api_versions(rkb, NULL, 0);
-
-		/* Authenticate if necessary */
-		rd_kafka_broker_connect_auth(rkb);
-	}
-
-}
-
-
-
-/**
- * @brief Checks if the given API request+version is supported by the broker.
- * @returns 1 if supported, else 0.
- * @locality broker thread
- * @locks none
- */
-static RD_INLINE int
-rd_kafka_broker_request_supported (rd_kafka_broker_t *rkb,
-                                   rd_kafka_buf_t *rkbuf) {
-        struct rd_kafka_ApiVersion skel = {
-                .ApiKey = rkbuf->rkbuf_reqhdr.ApiKey
-        };
-        struct rd_kafka_ApiVersion *ret;
-
-        if (unlikely(rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_ApiVersion))
-                return 1; /* ApiVersion requests are used to detect
-                           * the supported API versions, so should always
-                           * be allowed through. */
-
-        /* First try feature flags, if any, which may cover a larger
-         * set of APIs. */
-        if (rkbuf->rkbuf_features)
-                return (rkb->rkb_features & rkbuf->rkbuf_features) ==
-                        rkbuf->rkbuf_features;
-
-        /* Then try the ApiVersion map. */
-        ret = bsearch(&skel, rkb->rkb_ApiVersions, rkb->rkb_ApiVersions_cnt,
-                      sizeof(*rkb->rkb_ApiVersions),
-                      rd_kafka_ApiVersion_key_cmp);
-        if (!ret)
-                return 0;
-
-        return ret->MinVer <= rkbuf->rkbuf_reqhdr.ApiVersion &&
-                rkbuf->rkbuf_reqhdr.ApiVersion <= ret->MaxVer;
-}
-
-
-/**
- * Send queued messages to broker
- *
- * Locality: io thread
- */
-int rd_kafka_send (rd_kafka_broker_t *rkb) {
-	rd_kafka_buf_t *rkbuf;
-	unsigned int cnt = 0;
-
-	rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
-	while (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP &&
-	       rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight &&
-	       (rkbuf = TAILQ_FIRST(&rkb->rkb_outbufs.rkbq_bufs))) {
-		ssize_t r;
-                size_t pre_of = rd_slice_offset(&rkbuf->rkbuf_reader);
-
-                /* Check for broker support */
-                if (unlikely(!rd_kafka_broker_request_supported(rkb, rkbuf))) {
-                        rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf);
-                        rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL,
-                                   "UNSUPPORTED",
-                                   "Failing %sResponse "
-                                   "(v%hd, %"PRIusz" bytes, CorrId %"PRId32"): "
-                                   "request not supported by broker "
-                                   "(missing api.version.request or "
-                                   "incorrect broker.version.fallback config?)",
-                                   rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.
-                                                       ApiKey),
-                                   rkbuf->rkbuf_reqhdr.ApiVersion,
-                                   rkbuf->rkbuf_totlen,
-                                   rkbuf->rkbuf_reshdr.CorrId);
-                        rd_kafka_buf_callback(
-                                rkb->rkb_rk, rkb,
-                                RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
-                                NULL, rkbuf);
-                        continue;
-                }
-
-		/* Set CorrId header field, unless this is the latter part
-		 * of a partial send in which case the corrid has already
-		 * been set.
-		 * Due to how SSL_write() will accept a buffer but still
-		 * return 0 in some cases we can't rely on the buffer offset
-		 * but need to use corrid to check this. SSL_write() expects
-		 * us to send the same buffer again when 0 is returned.
-		 */
-		if (rkbuf->rkbuf_corrid == 0 ||
-		    rkbuf->rkbuf_connid != rkb->rkb_connid) {
-                        rd_assert(rd_slice_offset(&rkbuf->rkbuf_reader) == 0);
-			rkbuf->rkbuf_corrid = ++rkb->rkb_corrid;
-			rd_kafka_buf_update_i32(rkbuf, 4+2+2,
-						rkbuf->rkbuf_corrid);
-			rkbuf->rkbuf_connid = rkb->rkb_connid;
-		} else if (pre_of > RD_KAFKAP_REQHDR_SIZE) {
-			rd_kafka_assert(NULL,
-					rkbuf->rkbuf_connid == rkb->rkb_connid);
-                }
-
-		if (0) {
-			rd_rkb_dbg(rkb, PROTOCOL, "SEND",
-				   "Send %s corrid %"PRId32" at "
-				   "offset %"PRIusz"/%"PRIusz,
-				   rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.
-						       ApiKey),
-				   rkbuf->rkbuf_corrid,
-                                   pre_of, rd_slice_size(&rkbuf->rkbuf_reader));
-		}
-
-                if ((r = rd_kafka_broker_send(rkb, &rkbuf->rkbuf_reader)) == -1)
-                        return -1;
-
-                /* Partial send? Continue next time. */
-                if (rd_slice_remains(&rkbuf->rkbuf_reader) > 0) {
-                        rd_rkb_dbg(rkb, PROTOCOL, "SEND",
-                                   "Sent partial %sRequest "
-                                   "(v%hd, "
-                                   "%"PRIdsz"+%"PRIdsz"/%"PRIusz" bytes, "
-                                   "CorrId %"PRId32")",
-                                   rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.
-                                                       ApiKey),
-                                   rkbuf->rkbuf_reqhdr.ApiVersion,
-                                   (ssize_t)pre_of, r,
-                                   rd_slice_size(&rkbuf->rkbuf_reader),
-                                   rkbuf->rkbuf_corrid);
-                        return 0;
-                }
-
-		rd_rkb_dbg(rkb, PROTOCOL, "SEND",
-			   "Sent %sRequest (v%hd, %"PRIusz" bytes @ %"PRIusz", "
-			   "CorrId %"PRId32")",
-			   rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
-                           rkbuf->rkbuf_reqhdr.ApiVersion,
-                           rd_slice_size(&rkbuf->rkbuf_reader),
-                           pre_of, rkbuf->rkbuf_corrid);
-
-		/* Entire buffer sent, unlink from outbuf */
-		rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf);
-
-		/* Store time for RTT calculation */
-		rkbuf->rkbuf_ts_sent = rd_clock();
-
-                if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING &&
-		    rd_atomic32_add(&rkb->rkb_blocking_request_cnt, 1) == 1)
-			rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
-
-		/* Put buffer on response wait list unless we are not
-		 * expecting a response (required_acks=0). */
-		if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NO_RESPONSE))
-			rd_kafka_bufq_enq(&rkb->rkb_waitresps, rkbuf);
-		else { /* Call buffer callback for delivery report. */
-                        rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, NULL, rkbuf);
-                }
-
-		cnt++;
-	}
-
-	return cnt;
-}
-
-
-/**
- * Add 'rkbuf' to broker 'rkb's retry queue.
- */
-void rd_kafka_broker_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) {
-
-        /* Restore original replyq since replyq.q will have been NULLed
-         * by buf_callback()/replyq_enq(). */
-        if (!rkbuf->rkbuf_replyq.q && rkbuf->rkbuf_orig_replyq.q) {
-                rkbuf->rkbuf_replyq = rkbuf->rkbuf_orig_replyq;
-                rd_kafka_replyq_clear(&rkbuf->rkbuf_orig_replyq);
-        }
-
-        /* If called from another thread than rkb's broker thread
-         * enqueue the buffer on the broker's op queue. */
-        if (!thrd_is_current(rkb->rkb_thread)) {
-                rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_XMIT_RETRY);
-                rko->rko_u.xbuf.rkbuf = rkbuf;
-                rd_kafka_q_enq(rkb->rkb_ops, rko);
-                return;
-        }
-
-        rd_rkb_dbg(rkb, PROTOCOL, "RETRY",
-                   "Retrying %sRequest (v%hd, %"PRIusz" bytes, retry %d/%d)",
-                   rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
-                   rkbuf->rkbuf_reqhdr.ApiVersion,
-                   rd_slice_size(&rkbuf->rkbuf_reader),
-                   rkbuf->rkbuf_retries, rkb->rkb_rk->rk_conf.max_retries);
-
-	rd_atomic64_add(&rkb->rkb_c.tx_retries, 1);
-
-	rkbuf->rkbuf_ts_retry = rd_clock() +
-		(rkb->rkb_rk->rk_conf.retry_backoff_ms * 1000);
-        /* Reset send offset */
-        rd_slice_seek(&rkbuf->rkbuf_reader, 0);
-	rkbuf->rkbuf_corrid = 0;
-
-	rd_kafka_bufq_enq(&rkb->rkb_retrybufs, rkbuf);
-}
-
-
-/**
- * Move buffers that have expired their retry backoff time from the 
- * retry queue to the outbuf.
- */
-static void rd_kafka_broker_retry_bufs_move (rd_kafka_broker_t *rkb) {
-	rd_ts_t now = rd_clock();
-	rd_kafka_buf_t *rkbuf;
-
-	while ((rkbuf = TAILQ_FIRST(&rkb->rkb_retrybufs.rkbq_bufs))) {
-		if (rkbuf->rkbuf_ts_retry > now)
-			break;
-
-		rd_kafka_bufq_deq(&rkb->rkb_retrybufs, rkbuf);
-
-		rd_kafka_broker_buf_enq0(rkb, rkbuf, 0/*tail*/);
-	}
-}
-
-
-/**
- * Propagate delivery report for entire message queue.
- */
-void rd_kafka_dr_msgq (rd_kafka_itopic_t *rkt,
-		       rd_kafka_msgq_t *rkmq, rd_kafka_resp_err_t err) {
-        rd_kafka_t *rk = rkt->rkt_rk;
-
-	if (unlikely(rd_kafka_msgq_len(rkmq) == 0))
-	    return;
-
-        /* Call on_acknowledgement() interceptors */
-        rd_kafka_interceptors_on_acknowledgement_queue(rk, rkmq);
-
-        if ((rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR) &&
-	    (!rk->rk_conf.dr_err_only || err)) {
-		/* Pass all messages to application thread in one op. */
-		rd_kafka_op_t *rko;
-
-		rko = rd_kafka_op_new(RD_KAFKA_OP_DR);
-		rko->rko_err = err;
-		rko->rko_u.dr.s_rkt = rd_kafka_topic_keep(rkt);
-		rd_kafka_msgq_init(&rko->rko_u.dr.msgq);
-
-		/* Move all messages to op's msgq */
-		rd_kafka_msgq_move(&rko->rko_u.dr.msgq, rkmq);
-
-		rd_kafka_q_enq(rk->rk_rep, rko);
-
-	} else {
-		/* No delivery report callback. */
-
-                /* Destroy the messages right away. */
-                rd_kafka_msgq_purge(rk, rkmq);
-	}
-}
-
-
-
-
-
-
-
-
-
-
-
-/**
- * @brief Map and assign existing partitions to this broker using
- *        the leader-id.
- *
- * @locks none
- * @locality any
- */
-static void rd_kafka_broker_map_partitions (rd_kafka_broker_t *rkb) {
-        rd_kafka_t *rk = rkb->rkb_rk;
-        rd_kafka_itopic_t *rkt;
-        int cnt = 0;
-
-        if (rkb->rkb_nodeid == -1)
-                return;
-
-        rd_kafka_rdlock(rk);
-        TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
-                int i;
-
-                rd_kafka_topic_wrlock(rkt);
-                for (i = 0 ; i < rkt->rkt_partition_cnt ; i++) {
-                        shptr_rd_kafka_toppar_t *s_rktp = rkt->rkt_p[i];
-                        rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp);
-
-                        /* Only map unassigned partitions matching this broker*/
-                        rd_kafka_toppar_lock(rktp);
-                        if (rktp->rktp_leader_id == rkb->rkb_nodeid &&
-                            !(rktp->rktp_leader && rktp->rktp_next_leader)) {
-                                rd_kafka_toppar_leader_update(
-                                        rktp, rktp->rktp_leader_id, rkb);
-                                cnt++;
-                        }
-                        rd_kafka_toppar_unlock(rktp);
-                }
-                rd_kafka_topic_wrunlock(rkt);
-        }
-        rd_kafka_rdunlock(rk);
-
-        rd_rkb_dbg(rkb, TOPIC|RD_KAFKA_DBG_BROKER, "LEADER",
-                   "Mapped %d partition(s) to broker", cnt);
-}
-
-
-/**
- * @brief Broker id comparator
- */
-static int rd_kafka_broker_cmp_by_id (const void *_a, const void *_b) {
-        const rd_kafka_broker_t *a = _a, *b = _b;
-        return a->rkb_nodeid - b->rkb_nodeid;
-}
-
-
-
-/**
- * @brief Serve a broker op (an op posted by another thread to be handled by
- *        this broker's thread).
- *
- * @returns 0 if calling op loop should break out, else 1 to continue.
- * @locality broker thread
- * @locks none
- */
-static int rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb,
-				      rd_kafka_op_t *rko) {
-        shptr_rd_kafka_toppar_t *s_rktp;
-        rd_kafka_toppar_t *rktp;
-        int ret = 1;
-
-	rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
-	switch (rko->rko_type)
-	{
-        case RD_KAFKA_OP_NODE_UPDATE:
-        {
-                enum {
-                        _UPD_NAME = 0x1,
-                        _UPD_ID = 0x2
-                } updated = 0;
-                char brokername[RD_KAFKA_NODENAME_SIZE];
-
-                /* Need kafka_wrlock for updating rk_broker_by_id */
-                rd_kafka_wrlock(rkb->rkb_rk);
-                rd_kafka_broker_lock(rkb);
-
-                if (strcmp(rkb->rkb_nodename,
-                           rko->rko_u.node.nodename)) {
-                        rd_rkb_dbg(rkb, BROKER, "UPDATE",
-                                   "Nodename changed from %s to %s",
-                                   rkb->rkb_nodename,
-                                   rko->rko_u.node.nodename);
-                        strncpy(rkb->rkb_nodename,
-                                rko->rko_u.node.nodename,
-                                sizeof(rkb->rkb_nodename)-1);
-                        updated |= _UPD_NAME;
-                }
-
-                if (rko->rko_u.node.nodeid != -1 &&
-                    rko->rko_u.node.nodeid != rkb->rkb_nodeid) {
-                        int32_t old_nodeid = rkb->rkb_nodeid;
-                        rd_rkb_dbg(rkb, BROKER, "UPDATE",
-                                   "NodeId changed from %"PRId32" to %"PRId32,
-                                   rkb->rkb_nodeid,
-                                   rko->rko_u.node.nodeid);
-
-                        rkb->rkb_nodeid = rko->rko_u.node.nodeid;
-
-                        /* Update broker_by_id sorted list */
-                        if (old_nodeid == -1)
-                                rd_list_add(&rkb->rkb_rk->rk_broker_by_id, rkb);
-                        rd_list_sort(&rkb->rkb_rk->rk_broker_by_id,
-                                     rd_kafka_broker_cmp_by_id);
-
-                        updated |= _UPD_ID;
-                }
-
-                rd_kafka_mk_brokername(brokername, sizeof(brokername),
-                                       rkb->rkb_proto,
-				       rkb->rkb_nodename, rkb->rkb_nodeid,
-				       RD_KAFKA_LEARNED);
-                if (strcmp(rkb->rkb_name, brokername)) {
-                        /* Udate the name copy used for logging. */
-                        mtx_lock(&rkb->rkb_logname_lock);
-                        rd_free(rkb->rkb_logname);
-                        rkb->rkb_logname = rd_strdup(brokername);
-                        mtx_unlock(&rkb->rkb_logname_lock);
-
-                        rd_rkb_dbg(rkb, BROKER, "UPDATE",
-                                   "Name changed from %s to %s",
-                                   rkb->rkb_name, brokername);
-                        strncpy(rkb->rkb_name, brokername,
-                                sizeof(rkb->rkb_name)-1);
-                }
-                rd_kafka_broker_unlock(rkb);
-                rd_kafka_wrunlock(rkb->rkb_rk);
-
-                if (updated & _UPD_NAME)
-                        rd_kafka_broker_fail(rkb, LOG_NOTICE,
-                                             RD_KAFKA_RESP_ERR__NODE_UPDATE,
-                                             "Broker hostname updated");
-                else if (updated & _UPD_ID) {
-                        /* Map existing partitions to this broker. */
-                        rd_kafka_broker_map_partitions(rkb);
-
-			/* If broker is currently in state up we need
-			 * to trigger a state change so it exits its
-			 * state&type based .._serve() loop. */
-                        rd_kafka_broker_lock(rkb);
-			if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP)
-				rd_kafka_broker_set_state(
-					rkb, RD_KAFKA_BROKER_STATE_UPDATE);
-                        rd_kafka_broker_unlock(rkb);
-                }
-                break;
-        }
-
-        case RD_KAFKA_OP_XMIT_BUF:
-                rd_kafka_broker_buf_enq2(rkb, rko->rko_u.xbuf.rkbuf);
-                rko->rko_u.xbuf.rkbuf = NULL; /* buffer now owned by broker */
-                if (rko->rko_replyq.q) {
-                        /* Op will be reused for forwarding response. */
-                        rko = NULL;
-                }
-                break;
-
-        case RD_KAFKA_OP_XMIT_RETRY:
-                rd_kafka_broker_buf_retry(rkb, rko->rko_u.xbuf.rkbuf);
-                rko->rko_u.xbuf.rkbuf = NULL;
-                break;
-
-        case RD_KAFKA_OP_PARTITION_JOIN:
-                /*
-		 * Add partition to broker toppars
-		 */
-                rktp = rd_kafka_toppar_s2i(rko->rko_rktp);
-                rd_kafka_toppar_lock(rktp);
-
-                /* Abort join if instance is terminating */
-                if (rd_kafka_terminating(rkb->rkb_rk) ||
-		    (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE)) {
-                        rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
-                                   "Topic %s [%"PRId32"]: not joining broker: "
-                                   "%s",
-                                   rktp->rktp_rkt->rkt_topic->str,
-                                   rktp->rktp_partition,
-				   rd_kafka_terminating(rkb->rkb_rk) ?
-				   "instance is terminating" :
-				   "partition removed");
-
-                        rd_kafka_broker_destroy(rktp->rktp_next_leader);
-                        rktp->rktp_next_leader = NULL;
-                        rd_kafka_toppar_unlock(rktp);
-                        break;
-                }
-
-                /* See if we are still the next leader */
-                if (rktp->rktp_next_leader != rkb) {
-                        rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
-                                   "Topic %s [%"PRId32"]: not joining broker "
-                                   "(next leader %s)",
-                                   rktp->rktp_rkt->rkt_topic->str,
-                                   rktp->rktp_partition,
-                                   rktp->rktp_next_leader ?
-                                   rd_kafka_broker_name(rktp->rktp_next_leader):
-                                   "(none)");
-
-                        /* Need temporary refcount so we can safely unlock
-                         * after q_enq(). */
-                        s_rktp = rd_kafka_toppar_keep(rktp);
-
-                        /* No, forward this op to the new next leader. */
-                        rd_kafka_q_enq(rktp->rktp_next_leader->rkb_ops, rko);
-                        rko = NULL;
-
-                        rd_kafka_toppar_unlock(rktp);
-                        rd_kafka_toppar_destroy(s_rktp);
-
-                        break;
-                }
-
-                rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
-                           "Topic %s [%"PRId32"]: joining broker (rktp %p)",
-                           rktp->rktp_rkt->rkt_topic->str,
-                           rktp->rktp_partition, rktp);
-
-                rd_kafka_assert(NULL, rktp->rktp_s_for_rkb == NULL);
-		rktp->rktp_s_for_rkb = rd_kafka_toppar_keep(rktp);
-                rd_kafka_broker_lock(rkb);
-		TAILQ_INSERT_TAIL(&rkb->rkb_toppars, rktp, rktp_rkblink);
-		rkb->rkb_toppar_cnt++;
-                rd_kafka_broker_unlock(rkb);
-		rktp->rktp_leader = rkb;
-                rktp->rktp_msgq_wakeup_fd = rkb->rkb_toppar_wakeup_fd;
-                rd_kafka_broker_keep(rkb);
-
-                rd_kafka_broker_destroy(rktp->rktp_next_leader);
-                rktp->rktp_next_leader = NULL;
-
-                rd_kafka_toppar_unlock(rktp);
-
-		rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
-                break;
-
-        case RD_KAFKA_OP_PARTITION_LEAVE:
-                /*
-		 * Remove partition from broker toppars
-		 */
-                rktp = rd_kafka_toppar_s2i(rko->rko_rktp);
-
-		rd_kafka_toppar_lock(rktp);
-
-		/* Multiple PARTITION_LEAVEs are possible during partition
-		 * migration, make sure we're supposed to handle this one. */
-		if (unlikely(rktp->rktp_leader != rkb)) {
-			rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
-				   "Topic %s [%"PRId32"]: "
-				   "ignoring PARTITION_LEAVE: "
-				   "broker is not leader (%s)",
-				   rktp->rktp_rkt->rkt_topic->str,
-				   rktp->rktp_partition,
-				   rktp->rktp_leader ?
-				   rd_kafka_broker_name(rktp->rktp_leader) :
-				   "none");
-			rd_kafka_toppar_unlock(rktp);
-			break;
-		}
-		rd_kafka_toppar_unlock(rktp);
-
-		/* Remove from fetcher list */
-		rd_kafka_toppar_fetch_decide(rktp, rkb, 1/*force remove*/);
-
-		rd_kafka_toppar_lock(rktp);
-
-		rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
-			   "Topic %s [%"PRId32"]: leaving broker "
-			   "(%d messages in xmitq, next leader %s, rktp %p)",
-			   rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
-			   rd_kafka_msgq_len(&rktp->rktp_xmit_msgq),
-			   rktp->rktp_next_leader ?
-			   rd_kafka_broker_name(rktp->rktp_next_leader) :
-			   "(none)", rktp);
-
-		/* Prepend xmitq(broker-local) messages to the msgq(global).
-		 * There is no msgq_prepend() so we append msgq to xmitq
-		 * and then move the queue altogether back over to msgq. */
-		rd_kafka_msgq_concat(&rktp->rktp_xmit_msgq,
-				     &rktp->rktp_msgq);
-		rd_kafka_msgq_move(&rktp->rktp_msgq, &rktp->rktp_xmit_msgq);
-
-                rd_kafka_broker_lock(rkb);
-		TAILQ_REMOVE(&rkb->rkb_toppars, rktp, rktp_rkblink);
-		rkb->rkb_toppar_cnt--;
-                rd_kafka_broker_unlock(rkb);
-                rd_kafka_broker_destroy(rktp->rktp_leader);
-                rktp->rktp_msgq_wakeup_fd = -1;
-		rktp->rktp_leader = NULL;
-
-                /* Need to hold on to a refcount past q_enq() and
-                 * unlock() below */
-                s_rktp = rktp->rktp_s_for_rkb;
-                rktp->rktp_s_for_rkb = NULL;
-
-                if (rktp->rktp_next_leader) {
-                        /* There is a next leader we need to migrate to. */
-                        rko->rko_type = RD_KAFKA_OP_PARTITION_JOIN;
-                        rd_kafka_q_enq(rktp->rktp_next_leader->rkb_ops, rko);
-                        rko = NULL;
-                } else {
-			rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
-				   "Topic %s [%"PRId32"]: no next leader, "
-				   "failing %d message(s) in partition queue",
-				   rktp->rktp_rkt->rkt_topic->str,
-				   rktp->rktp_partition,
-				   rd_kafka_msgq_len(&rktp->rktp_msgq));
-			rd_kafka_assert(NULL, rd_kafka_msgq_len(&rktp->rktp_xmit_msgq) == 0);
-			rd_kafka_dr_msgq(rktp->rktp_rkt, &rktp->rktp_msgq,
-					 rd_kafka_terminating(rkb->rkb_rk) ?
-					 RD_KAFKA_RESP_ERR__DESTROY :
-					 RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION);
-
-		}
-
-                rd_kafka_toppar_unlock(rktp);
-                rd_kafka_toppar_destroy(s_rktp);
-
-		rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
-                break;
-
-        case RD_KAFKA_OP_TERMINATE:
-                /* nop: just a wake-up. */
-                if (rkb->rkb_blocking_max_ms > 1)
-                        rkb->rkb_blocking_max_ms = 1; /* Speed up termination*/
-                rd_rkb_dbg(rkb, BROKER, "TERM",
-                           "Received TERMINATE op in state %s: "
-                           "%d refcnts, %d toppar(s), %d fetch toppar(s), "
-                           "%d outbufs, %d waitresps, %d retrybufs",
-                           rd_kafka_broker_state_names[rkb->rkb_state],
-                           rd_refcnt_get(&rkb->rkb_refcnt),
-                           rkb->rkb_toppar_cnt, rkb->rkb_fetch_toppar_cnt,
-                           (int)rd_kafka_bufq_cnt(&rkb->rkb_outbufs),
-                           (int)rd_kafka_bufq_cnt(&rkb->rkb_waitresps),
-                           (int)rd_kafka_bufq_cnt(&rkb->rkb_retrybufs));
-                ret = 0;
-                break;
-
-        default:
-                rd_kafka_assert(rkb->rkb_rk, !*"unhandled op type");
-                break;
-        }
-
-        if (rko)
-                rd_kafka_op_destroy(rko);
-
-        return ret;
-}
-
-
-/**
- * @brief Serve broker ops and IOs.
- *
- * @param abs_timeout Maximum block time (absolute time).
- *
- * @locality broker thread
- * @locks none
- */
-static void rd_kafka_broker_serve (rd_kafka_broker_t *rkb,
-                                   rd_ts_t abs_timeout) {
-        rd_kafka_op_t *rko;
-        rd_ts_t now;
-        int initial_state = rkb->rkb_state;
-        int remains_ms = rd_timeout_remains(abs_timeout);
-
-        /* Serve broker ops */
-        while ((rko = rd_kafka_q_pop(rkb->rkb_ops,
-                                     !rkb->rkb_transport ?
-                                     remains_ms : RD_POLL_NOWAIT,
-                                     0))
-               && rd_kafka_broker_op_serve(rkb, rko))
-                remains_ms = RD_POLL_NOWAIT;
-
-        /* If the broker state changed in op_serve() we minimize
-         * the IO timeout since our caller might want to exit out of
-         * its loop on state change. */
-        if (likely(rkb->rkb_transport != NULL)) {
-                int blocking_max_ms;
-
-                if ((int)rkb->rkb_state != initial_state)
-                        blocking_max_ms = 0;
-                else {
-                        int remains_ms = rd_timeout_remains(abs_timeout);
-                        if (remains_ms == RD_POLL_INFINITE ||
-                            remains_ms > rkb->rkb_blocking_max_ms)
-                                remains_ms = rkb->rkb_blocking_max_ms;
-                        blocking_max_ms = remains_ms;
-                }
-
-                /* Serve IO events */
-                rd_kafka_transport_io_serve(rkb->rkb_transport,
-                                            blocking_max_ms);
-        }
-
-        /* Scan wait-response queue for timeouts. */
-        now = rd_clock();
-        if (rd_interval(&rkb->rkb_timeout_scan_intvl, 1000000, now) > 0)
-                rd_kafka_broker_timeout_scan(rkb, now);
-}
-
-
-/**
- * @brief Serve the toppar's assigned to this broker.
- *
- * @returns the minimum Fetch backoff time (abs timestamp) for the
- *          partitions to fetch.
- *
- * @locality broker thread
- */
-static rd_ts_t rd_kafka_broker_toppars_serve (rd_kafka_broker_t *rkb) {
-        rd_kafka_toppar_t *rktp, *rktp_tmp;
-        rd_ts_t min_backoff = RD_TS_MAX;
-
-        TAILQ_FOREACH_SAFE(rktp, &rkb->rkb_toppars, rktp_rkblink, rktp_tmp) {
-                rd_ts_t backoff;
-
-                /* Serve toppar to update desired rktp state */
-                backoff = rd_kafka_broker_consumer_toppar_serve(rkb, rktp);
-                if (backoff < min_backoff)
-                        min_backoff = backoff;
-        }
-
-        return min_backoff;
-}
-
-
-/**
- * Idle function for unassigned brokers
- * If \p timeout_ms is not RD_POLL_INFINITE the serve loop will be exited
- * regardless of state after this long (approximately).
- */
-static void rd_kafka_broker_ua_idle (rd_kafka_broker_t *rkb, int timeout_ms) {
-        int initial_state = rkb->rkb_state;
-        rd_ts_t abs_timeout;
-
-        if (rd_kafka_terminating(rkb->rkb_rk))
-                timeout_ms = 1;
-        else if (timeout_ms == RD_POLL_INFINITE)
-                timeout_ms = rkb->rkb_blocking_max_ms;
-
-        abs_timeout = rd_timeout_init(timeout_ms);
-
-        /* Since ua_idle is used during connection setup
-         * in state ..BROKER_STATE_CONNECT we only run this loop
-         * as long as the state remains the same as the initial, on a state
-         * change - most likely to UP, a correct serve() function
-         * should be used instead. */
-        while (!rd_kafka_broker_terminating(rkb) &&
-               (int)rkb->rkb_state == initial_state &&
-               !rd_timeout_expired(rd_timeout_remains(abs_timeout))) {
-
-                rd_kafka_broker_toppars_serve(rkb);
-                rd_kafka_broker_serve(rkb, abs_timeout);
-        }
-}
-
-
-/**
- * @brief Serve a toppar for producing.
- *
- * @param next_wakeup will be updated to when the next wake-up/attempt is
- *                    desired, only lower (sooner) values will be set.
- *
- * Locks: toppar_lock(rktp) MUST be held. 
- * Returns the number of messages produced.
- */
-static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb,
-                                           rd_kafka_toppar_t *rktp,
-                                           int do_timeout_scan,
-                                           rd_ts_t now,
-                                           rd_ts_t *next_wakeup) {
-        int cnt = 0;
-        int r;
-
-        rd_rkb_dbg(rkb, QUEUE, "TOPPAR",
-                   "%.*s [%"PRId32"] %i+%i msgs",
-                   RD_KAFKAP_STR_PR(rktp->rktp_rkt->
-                                    rkt_topic),
-                   rktp->rktp_partition,
-                   rd_atomic32_get(&rktp->rktp_msgq.rkmq_msg_cnt),
-                   rd_atomic32_get(&rktp->rktp_xmit_msgq.
-                                   rkmq_msg_cnt));
-
-        if (rd_atomic32_get(&rktp->rktp_msgq.rkmq_msg_cnt) > 0)
-                rd_kafka_msgq_concat(&rktp->rktp_xmit_msgq, &rktp->rktp_msgq);
-
-        /* Timeout scan */
-        if (unlikely(do_timeout_scan)) {
-                rd_kafka_msgq_t timedout = RD_KAFKA_MSGQ_INITIALIZER(timedout);
-
-                if (rd_kafka_msgq_age_scan(&rktp->rktp_xmit_msgq,
-                                           &timedout, now)) {
-                        /* Trigger delivery report for timed out messages */
-                        rd_kafka_dr_msgq(rktp->rktp_rkt, &timedout,
-                                         RD_KAFKA_RESP_ERR__MSG_TIMED_OUT);
-                }
-        }
-
-        r = rd_atomic32_get(&rktp->rktp_xmit_msgq.rkmq_msg_cnt);
-        if (r == 0)
-                return 0;
-
-        /* Attempt to fill the batch size, but limit
-         * our waiting to queue.buffering.max.ms
-         * and batch.num.messages. */
-        if (r < rkb->rkb_rk->rk_conf.batch_num_messages) {
-                rd_kafka_msg_t *rkm_oldest;
-                rd_ts_t wait_max;
-
-                rkm_oldest = TAILQ_FIRST(&rktp->rktp_xmit_msgq.rkmq_msgs);
-                if (unlikely(!rkm_oldest))
-                        return 0;
-
-                /* Calculate maximum wait-time to
-                 * honour queue.buffering.max.ms contract. */
-                wait_max = rd_kafka_msg_enq_time(rkm_oldest) +
-                        (rkb->rkb_rk->rk_conf.buffering_max_ms * 1000);
-                if (wait_max > now) {
-                        if (wait_max < *next_wakeup)
-                                *next_wakeup = wait_max;
-                        /* Wait for more messages or queue.buffering.max.ms
-                         * to expire. */
-                        return 0;
-                }
-        }
-
-        /* Send Produce requests for this toppar */
-        while (1) {
-                r = rd_kafka_ProduceRequest(rkb, rktp);
-                if (likely(r > 0))
-                        cnt += r;
-                else
-                        break;
-        }
-
-        return cnt;
-}
-
-
-/**
- * Producer serving
- */
-static void rd_kafka_broker_producer_serve (rd_kafka_broker_t *rkb) {
-        rd_interval_t timeout_scan;
-
-        rd_interval_init(&timeout_scan);
-
-        rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
-	rd_kafka_broker_lock(rkb);
-
-	while (!rd_kafka_broker_terminating(rkb) &&
-	       rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP) {
-		rd_kafka_toppar_t *rktp;
-		int cnt;
-		rd_ts_t now;
-                rd_ts_t next_wakeup;
-                int do_timeout_scan = 0;
-
-		rd_kafka_broker_unlock(rkb);
-
-		now = rd_clock();
-                next_wakeup = now + (rkb->rkb_rk->rk_conf.
-                                     socket_blocking_max_ms * 1000);
-
-                if (rd_interval(&timeout_scan, 1000*1000, now) >= 0)
-                        do_timeout_scan = 1;
-
-		do {
-			cnt = 0;
-
-                        /* Serve each toppar */
-			TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) {
-                                /* Serve toppar op queue */
-                                rd_kafka_toppar_lock(rktp);
-                                if (unlikely(rktp->rktp_leader != rkb)) {
-                                        /* Currently migrating away from this
-                                         * broker. */
-                                        rd_kafka_toppar_unlock(rktp);
-                                        continue;
-                                }
-				if (unlikely(RD_KAFKA_TOPPAR_IS_PAUSED(rktp))) {
-					/* Partition is paused */
-					rd_kafka_toppar_unlock(rktp);
-					continue;
-				}
-                                /* Try producing toppar */
-                                cnt += rd_kafka_toppar_producer_serve(
-                                        rkb, rktp, do_timeout_scan, now,
-                                        &next_wakeup);
-
-                                rd_kafka_toppar_unlock(rktp);
-			}
-
-		} while (cnt);
-
-		/* Check and move retry buffers */
-		if (unlikely(rd_atomic32_get(&rkb->rkb_retrybufs.rkbq_cnt) > 0))
-			rd_kafka_broker_retry_bufs_move(rkb);
-
-                rkb->rkb_blocking_max_ms = (int)
-                        (next_wakeup > now ? (next_wakeup - now) / 1000 : 0);
-		rd_kafka_broker_serve(rkb, next_wakeup);
-
-		rd_kafka_broker_lock(rkb);
-	}
-
-	rd_kafka_broker_unlock(rkb);
-}
-
-
-
-
-
-
-
-/**
- * Backoff the next Fetch request (due to error).
- */
-static void rd_kafka_broker_fetch_backoff (rd_kafka_broker_t *rkb,
-                                           rd_kafka_resp_err_t err) {
-        int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms;
-        rkb->rkb_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000);
-        rd_rkb_dbg(rkb, FETCH, "BACKOFF",
-                   "Fetch backoff for %dms: %s",
-                   backoff_ms, rd_kafka_err2str(err));
-}
-
-/**
- * @brief Backoff the next Fetch for specific partition
- */
-static void rd_kafka_toppar_fetch_backoff (rd_kafka_broker_t *rkb,
-                                           rd_kafka_toppar_t *rktp,
-                                           rd_kafka_resp_err_t err) {
-        int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms;
-        rktp->rktp_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000);
-        rd_rkb_dbg(rkb, FETCH, "BACKOFF",
-                   "%s [%"PRId32"]: Fetch backoff for %dms: %s",
-                   rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
-                   backoff_ms, rd_kafka_err2str(err));
-}
-
-
-/**
- * Parses and handles a Fetch reply.
- * Returns 0 on success or an error code on failure.
- */
-static rd_kafka_resp_err_t
-rd_kafka_fetch_reply_handle (rd_kafka_broker_t *rkb,
-			     rd_kafka_buf_t *rkbuf, rd_kafka_buf_t *request) {
-	int32_t TopicArrayCnt;
-	int i;
-        const int log_decode_errors = LOG_ERR;
-        shptr_rd_kafka_itopic_t *s_rkt = NULL;
-
-	if (rd_kafka_buf_ApiVersion(request) >= 1) {
-		int32_t Throttle_Time;
-		rd_kafka_buf_read_i32(rkbuf, &Throttle_Time);
-
-		rd_kafka_op_throttle_time(rkb, rkb->rkb_rk->rk_rep,
-					  Throttle_Time);
-	}
-
-	rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt);
-	/* Verify that TopicArrayCnt seems to be in line with remaining size */
-	rd_kafka_buf_check_len(rkbuf,
-			       TopicArrayCnt * (3/*topic min size*/ +
-						4/*PartitionArrayCnt*/ +
-						4+2+8+4/*inner header*/));
-
-	for (i = 0 ; i < TopicArrayCnt ; i++) {
-		rd_kafkap_str_t topic;
-		int32_t fetch_version;
-		int32_t PartitionArrayCnt;
-		int j;
-
-		rd_kafka_buf_read_str(rkbuf, &topic);
-		rd_kafka_buf_read_i32(rkbuf, &PartitionArrayCnt);
-
-                s_rkt = rd_kafka_topic_find0(rkb->rkb_rk, &topic);
-
-		for (j = 0 ; j < PartitionArrayCnt ; j++) {
-			struct rd_kafka_toppar_ver *tver, tver_skel;
-                        rd_kafka_toppar_t *rktp;
-                        shptr_rd_kafka_toppar_t *s_rktp = NULL;
-                        rd_slice_t save_slice;
-                        struct {
-                                int32_t Partition;
-                                int16_t ErrorCode;
-                                int64_t HighwaterMarkOffset;
-                                int64_t LastStableOffset;       /* v4 */
-                                int32_t MessageSetSize;
-                        } hdr;
-                        rd_kafka_resp_err_t err;
-
-			rd_kafka_buf_read_i32(rkbuf, &hdr.Partition);
-			rd_kafka_buf_read_i16(rkbuf, &hdr.ErrorCode);
-			rd_kafka_buf_read_i64(rkbuf, &hdr.HighwaterMarkOffset);
-
-                        if (rd_kafka_buf_ApiVersion(request) == 4) {
-                                int32_t AbortedTxCnt;
-                                rd_kafka_buf_read_i64(rkbuf,
-                                                      &hdr.LastStableOffset);
-                                rd_kafka_buf_read_i32(rkbuf, &AbortedTxCnt);
-                                /* Ignore aborted transactions for now */
-                                if (AbortedTxCnt > 0)
-                                        rd_kafka_buf_skip(rkbuf,
-                                                          AbortedTxCnt * (8+8));
-                        } else
-                                hdr.LastStableOffset = -1;
-
-			rd_kafka_buf_read_i32(rkbuf, &hdr.MessageSetSize);
-
-                        if (unlikely(hdr.MessageSetSize < 0))
-                                rd_kafka_buf_parse_fail(
-                                        rkbuf,
-                                        "%.*s [%"PRId32"]: "
-                                        "invalid MessageSetSize %"PRId32,
-                                        RD_KAFKAP_STR_PR(&topic),
-                                        hdr.Partition,
-                                        hdr.MessageSetSize);
-
-			/* Look up topic+partition */
-                        if (likely(s_rkt != NULL)) {
-                                rd_kafka_itopic_t *rkt;
-                                rkt = rd_kafka_topic_s2i(s_rkt);
-                                rd_kafka_topic_rdlock(rkt);
-                                s_rktp = rd_kafka_toppar_get(
-                                        rkt, hdr.Partition, 0/*no ua-on-miss*/);
-                                rd_kafka_topic_rdunlock(rkt);
-                        }
-
-			if (unlikely(!s_rkt || !s_rktp)) {
-				rd_rkb_dbg(rkb, TOPIC, "UNKTOPIC",
-					   "Received Fetch response "
-					   "(error %hu) for unknown topic "
-					   "%.*s [%"PRId32"]: ignoring",
-					   hdr.ErrorCode,
-					   RD_KAFKAP_STR_PR(&topic),
-					   hdr.Partition);
-				rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize);
-				continue;
-			}
-
-                        rktp = rd_kafka_toppar_s2i(s_rktp);
-
-                        rd_kafka_toppar_lock(rktp);
-                        /* Make sure toppar hasn't moved to another broker
-                         * during the lifetime of the request. */
-                        if (unlikely(rktp->rktp_leader != rkb)) {
-                                rd_kafka_toppar_unlock(rktp);
-                                rd_rkb_dbg(rkb, MSG, "FETCH",
-

<TRUNCATED>

[16/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_topic.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_topic.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_topic.h
deleted file mode 100644
index 49d1d29..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_topic.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include "rdlist.h"
-
-extern const char *rd_kafka_topic_state_names[];
-
-
-/* rd_kafka_itopic_t: internal representation of a topic */
-struct rd_kafka_itopic_s {
-	TAILQ_ENTRY(rd_kafka_itopic_s) rkt_link;
-
-	rd_refcnt_t        rkt_refcnt;
-
-	rwlock_t           rkt_lock;
-	rd_kafkap_str_t   *rkt_topic;
-
-	shptr_rd_kafka_toppar_t  *rkt_ua;  /* unassigned partition */
-	shptr_rd_kafka_toppar_t **rkt_p;
-	int32_t            rkt_partition_cnt;
-
-        rd_list_t          rkt_desp;              /* Desired partitions
-                                                   * that are not yet seen
-                                                   * in the cluster. */
-
-	rd_ts_t            rkt_ts_metadata; /* Timestamp of last metadata
-					     * update for this topic. */
-
-        mtx_t              rkt_app_lock;    /* Protects rkt_app_* */
-        rd_kafka_topic_t *rkt_app_rkt;      /* A shared topic pointer
-                                             * to be used for callbacks
-                                             * to the application. */
-
-	int               rkt_app_refcnt;   /* Number of active rkt's new()ed
-					     * by application. */
-
-	enum {
-		RD_KAFKA_TOPIC_S_UNKNOWN,   /* No cluster information yet */
-		RD_KAFKA_TOPIC_S_EXISTS,    /* Topic exists in cluster */
-		RD_KAFKA_TOPIC_S_NOTEXISTS, /* Topic is not known in cluster */
-	} rkt_state;
-
-        int               rkt_flags;
-#define RD_KAFKA_TOPIC_F_LEADER_UNAVAIL   0x1 /* Leader lost/unavailable
-                                               * for at least one partition. */
-
-	rd_kafka_t       *rkt_rk;
-
-        shptr_rd_kafka_itopic_t *rkt_shptr_app; /* Application's topic_new() */
-
-	rd_kafka_topic_conf_t rkt_conf;
-};
-
-#define rd_kafka_topic_rdlock(rkt)     rwlock_rdlock(&(rkt)->rkt_lock)
-#define rd_kafka_topic_wrlock(rkt)     rwlock_wrlock(&(rkt)->rkt_lock)
-#define rd_kafka_topic_rdunlock(rkt)   rwlock_rdunlock(&(rkt)->rkt_lock)
-#define rd_kafka_topic_wrunlock(rkt)   rwlock_wrunlock(&(rkt)->rkt_lock)
-
-
-/* Converts a shptr..itopic_t to an internal itopic_t */
-#define rd_kafka_topic_s2i(s_rkt) rd_shared_ptr_obj(s_rkt)
-
-/* Converts an application topic_t (a shptr topic) to an internal itopic_t */
-#define rd_kafka_topic_a2i(app_rkt) \
-        rd_kafka_topic_s2i((shptr_rd_kafka_itopic_t *)app_rkt)
-
-/* Converts a shptr..itopic_t to an app topic_t (they are the same thing) */
-#define rd_kafka_topic_s2a(s_rkt) ((rd_kafka_topic_t *)(s_rkt))
-
-/* Converts an app topic_t to a shptr..itopic_t (they are the same thing) */
-#define rd_kafka_topic_a2s(app_rkt) ((shptr_rd_kafka_itopic_t *)(app_rkt))
-
-
-
-
-
-/**
- * Returns a shared pointer for the topic.
- */
-#define rd_kafka_topic_keep(rkt) \
-        rd_shared_ptr_get(rkt, &(rkt)->rkt_refcnt, shptr_rd_kafka_itopic_t)
-
-/* Same, but casts to an app topic_t */
-#define rd_kafka_topic_keep_a(rkt)                                      \
-        ((rd_kafka_topic_t *)rd_shared_ptr_get(rkt, &(rkt)->rkt_refcnt, \
-                                               shptr_rd_kafka_itopic_t))
-
-void rd_kafka_topic_destroy_final (rd_kafka_itopic_t *rkt);
-
-
-/**
- * Frees a shared pointer previously returned by ..topic_keep()
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_topic_destroy0 (shptr_rd_kafka_itopic_t *s_rkt) {
-        rd_shared_ptr_put(s_rkt,
-                          &rd_kafka_topic_s2i(s_rkt)->rkt_refcnt,
-                          rd_kafka_topic_destroy_final(
-                                  rd_kafka_topic_s2i(s_rkt)));
-}
-
-
-shptr_rd_kafka_itopic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, const char *topic,
-                                              rd_kafka_topic_conf_t *conf,
-                                              int *existing, int do_lock);
-
-shptr_rd_kafka_itopic_t *rd_kafka_topic_find_fl (const char *func, int line,
-                                                 rd_kafka_t *rk,
-                                                 const char *topic,
-                                                 int do_lock);
-shptr_rd_kafka_itopic_t *rd_kafka_topic_find0_fl (const char *func, int line,
-                                                  rd_kafka_t *rk,
-                                                  const rd_kafkap_str_t *topic);
-#define rd_kafka_topic_find(rk,topic,do_lock)                           \
-        rd_kafka_topic_find_fl(__FUNCTION__,__LINE__,rk,topic,do_lock)
-#define rd_kafka_topic_find0(rk,topic)                                  \
-        rd_kafka_topic_find0_fl(__FUNCTION__,__LINE__,rk,topic)
-int rd_kafka_topic_cmp_s_rkt (const void *_a, const void *_b);
-
-void rd_kafka_topic_partitions_remove (rd_kafka_itopic_t *rkt);
-
-void rd_kafka_topic_metadata_none (rd_kafka_itopic_t *rkt);
-
-int rd_kafka_topic_metadata_update2 (rd_kafka_broker_t *rkb,
-                                     const struct rd_kafka_metadata_topic *mdt);
-
-int rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now);
-
-
-typedef struct rd_kafka_topic_info_s {
-	const char *topic;          /**< Allocated along with struct */
-	int   partition_cnt;
-} rd_kafka_topic_info_t;
-
-
-int rd_kafka_topic_info_cmp (const void *_a, const void *_b);
-rd_kafka_topic_info_t *rd_kafka_topic_info_new (const char *topic,
-						int partition_cnt);
-void rd_kafka_topic_info_destroy (rd_kafka_topic_info_t *ti);
-
-int rd_kafka_topic_match (rd_kafka_t *rk, const char *pattern,
-			  const char *topic);
-
-int rd_kafka_toppar_leader_update (rd_kafka_toppar_t *rktp,
-                                   int32_t leader_id, rd_kafka_broker_t *rkb);
-
-rd_kafka_resp_err_t
-rd_kafka_topics_leader_query_sync (rd_kafka_t *rk, int all_topics,
-                                   const rd_list_t *topics, int timeout_ms);
-void rd_kafka_topic_leader_query0 (rd_kafka_t *rk, rd_kafka_itopic_t *rkt,
-                                   int do_rk_lock);
-#define rd_kafka_topic_leader_query(rk,rkt) \
-        rd_kafka_topic_leader_query0(rk,rkt,1/*lock*/)
-
-#define rd_kafka_topic_fast_leader_query(rk) \
-        rd_kafka_metadata_fast_leader_query(rk)
-
-void rd_kafka_local_topics_to_list (rd_kafka_t *rk, rd_list_t *topics);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_transport.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_transport.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_transport.c
deleted file mode 100644
index 3a5f93c..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_transport.c
+++ /dev/null
@@ -1,1523 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2015, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifdef _MSC_VER
-#pragma comment(lib, "ws2_32.lib")
-#endif
-
-#define __need_IOV_MAX
-
-#define _DARWIN_C_SOURCE  /* MSG_DONTWAIT */
-
-#include "rdkafka_int.h"
-#include "rdaddr.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_broker.h"
-
-#include <errno.h>
-
-#if WITH_VALGRIND
-/* OpenSSL relies on uninitialized memory, which Valgrind will whine about.
- * We use in-code Valgrind macros to suppress those warnings. */
-#include <valgrind/memcheck.h>
-#else
-#define VALGRIND_MAKE_MEM_DEFINED(A,B)
-#endif
-
-
-#ifdef _MSC_VER
-#define socket_errno WSAGetLastError()
-#else
-#include <sys/socket.h>
-#define socket_errno errno
-#define SOCKET_ERROR -1
-#endif
-
-/* AIX doesn't have MSG_DONTWAIT */
-#ifndef MSG_DONTWAIT
-#  define MSG_DONTWAIT MSG_NONBLOCK
-#endif
-
-
-#if WITH_SSL
-static mtx_t *rd_kafka_ssl_locks;
-static int    rd_kafka_ssl_locks_cnt;
-#endif
-
-
-
-/**
- * Low-level socket close
- */
-static void rd_kafka_transport_close0 (rd_kafka_t *rk, int s) {
-        if (rk->rk_conf.closesocket_cb)
-                rk->rk_conf.closesocket_cb(s, rk->rk_conf.opaque);
-        else {
-#ifndef _MSC_VER
-		close(s);
-#else
-		closesocket(s);
-#endif
-        }
-
-}
-
-/**
- * Close and destroy a transport handle
- */
-void rd_kafka_transport_close (rd_kafka_transport_t *rktrans) {
-#if WITH_SSL
-	if (rktrans->rktrans_ssl) {
-		SSL_shutdown(rktrans->rktrans_ssl);
-		SSL_free(rktrans->rktrans_ssl);
-	}
-#endif
-
-        rd_kafka_sasl_close(rktrans);
-
-	if (rktrans->rktrans_recv_buf)
-		rd_kafka_buf_destroy(rktrans->rktrans_recv_buf);
-
-	if (rktrans->rktrans_s != -1)
-                rd_kafka_transport_close0(rktrans->rktrans_rkb->rkb_rk,
-                                          rktrans->rktrans_s);
-
-	rd_free(rktrans);
-}
-
-
-static const char *socket_strerror(int err) {
-#ifdef _MSC_VER
-	static RD_TLS char buf[256];
-        rd_strerror_w32(err, buf, sizeof(buf));
-	return buf;
-#else
-	return rd_strerror(err);
-#endif
-}
-
-
-
-
-#ifndef _MSC_VER
-/**
- * @brief sendmsg() abstraction, converting a list of segments to iovecs.
- * @remark should only be called if the number of segments is > 1.
- */
-ssize_t rd_kafka_transport_socket_sendmsg (rd_kafka_transport_t *rktrans,
-                                           rd_slice_t *slice,
-                                           char *errstr, size_t errstr_size) {
-        struct iovec iov[IOV_MAX];
-        struct msghdr msg = { .msg_iov = iov };
-        size_t iovlen;
-        ssize_t r;
-
-        rd_slice_get_iov(slice, msg.msg_iov, &iovlen, IOV_MAX,
-                         /* FIXME: Measure the effects of this */
-                         rktrans->rktrans_sndbuf_size);
-        msg.msg_iovlen = (typeof(msg.msg_iovlen))iovlen;
-
-#ifdef sun
-        /* See recvmsg() comment. Setting it here to be safe. */
-        socket_errno = EAGAIN;
-#endif
-
-        r = sendmsg(rktrans->rktrans_s, &msg, MSG_DONTWAIT
-#ifdef MSG_NOSIGNAL
-                    | MSG_NOSIGNAL
-#endif
-                );
-
-        if (r == -1) {
-                if (socket_errno == EAGAIN)
-                        return 0;
-                rd_snprintf(errstr, errstr_size, "%s", rd_strerror(errno));
-        }
-
-        /* Update buffer read position */
-        rd_slice_read(slice, NULL, (size_t)r);
-
-        return r;
-}
-#endif
-
-
-/**
- * @brief Plain send() abstraction
- */
-static ssize_t
-rd_kafka_transport_socket_send0 (rd_kafka_transport_t *rktrans,
-                                 rd_slice_t *slice,
-                                 char *errstr, size_t errstr_size) {
-        ssize_t sum = 0;
-        const void *p;
-        size_t rlen;
-
-        while ((rlen = rd_slice_peeker(slice, &p))) {
-                ssize_t r;
-
-                r = send(rktrans->rktrans_s, p,
-#ifdef _MSC_VER
-                         (int)rlen, (int)0
-#else
-                         rlen, 0
-#endif
-                );
-
-#ifdef _MSC_VER
-                if (unlikely(r == SOCKET_ERROR)) {
-                        if (sum > 0 || WSAGetLastError() == WSAEWOULDBLOCK)
-                                return sum;
-                        else {
-                                rd_snprintf(errstr, errstr_size, "%s",
-                                            socket_strerror(WSAGetLastError()));
-                                return -1;
-                        }
-                }
-#else
-                if (unlikely(r <= 0)) {
-                        if (r == 0 || errno == EAGAIN)
-                                return 0;
-                        rd_snprintf(errstr, errstr_size, "%s",
-                                    socket_strerror(socket_errno));
-                        return -1;
-                }
-#endif
-
-                /* Update buffer read position */
-                rd_slice_read(slice, NULL, (size_t)r);
-
-                sum += r;
-
-                /* FIXME: remove this and try again immediately and let
-                 *        the next write() call fail instead? */
-                if ((size_t)r < rlen)
-                        break;
-        }
-
-        return sum;
-}
-
-
-static ssize_t
-rd_kafka_transport_socket_send (rd_kafka_transport_t *rktrans,
-                                rd_slice_t *slice,
-                                char *errstr, size_t errstr_size) {
-#ifndef _MSC_VER
-        /* FIXME: Use sendmsg() with iovecs if there's more than one segment
-         * remaining, otherwise (or if platform does not have sendmsg)
-         * use plain send(). */
-        return rd_kafka_transport_socket_sendmsg(rktrans, slice,
-                                                 errstr, errstr_size);
-#endif
-        return rd_kafka_transport_socket_send0(rktrans, slice,
-                                               errstr, errstr_size);
-}
-
-
-
-#ifndef _MSC_VER
-/**
- * @brief recvmsg() abstraction, converting a list of segments to iovecs.
- * @remark should only be called if the number of segments is > 1.
- */
-static ssize_t
-rd_kafka_transport_socket_recvmsg (rd_kafka_transport_t *rktrans,
-                                   rd_buf_t *rbuf,
-                                   char *errstr, size_t errstr_size) {
-        ssize_t r;
-        struct iovec iov[IOV_MAX];
-        struct msghdr msg = { .msg_iov = iov };
-        size_t iovlen;
-
-        rd_buf_get_write_iov(rbuf, msg.msg_iov, &iovlen, IOV_MAX,
-                             /* FIXME: Measure the effects of this */
-                             rktrans->rktrans_rcvbuf_size);
-        msg.msg_iovlen = (typeof(msg.msg_iovlen))iovlen;
-
-#ifdef sun
-        /* SunOS doesn't seem to set errno when recvmsg() fails
-         * due to no data and MSG_DONTWAIT is set. */
-        socket_errno = EAGAIN;
-#endif
-        r = recvmsg(rktrans->rktrans_s, &msg, MSG_DONTWAIT);
-        if (unlikely(r <= 0)) {
-                if (r == -1 && socket_errno == EAGAIN)
-                        return 0;
-                else if (r == 0) {
-                        /* Receive 0 after POLLIN event means
-                         * connection closed. */
-                        rd_snprintf(errstr, errstr_size, "Disconnected");
-                        return -1;
-                } else if (r == -1) {
-                        rd_snprintf(errstr, errstr_size, "%s",
-                                    rd_strerror(errno));
-                        return -1;
-                }
-        }
-
-        /* Update buffer write position */
-        rd_buf_write(rbuf, NULL, (size_t)r);
-
-        return r;
-}
-#endif
-
-
-/**
- * @brief Plain recv()
- */
-static ssize_t
-rd_kafka_transport_socket_recv0 (rd_kafka_transport_t *rktrans,
-                                 rd_buf_t *rbuf,
-                                 char *errstr, size_t errstr_size) {
-        ssize_t sum = 0;
-        void *p;
-        size_t len;
-
-        while ((len = rd_buf_get_writable(rbuf, &p))) {
-                ssize_t r;
-
-                r = recv(rktrans->rktrans_s, p,
-#ifdef _MSC_VER
-                         (int)
-#endif
-                         len,
-                         0);
-
-#ifdef _MSC_VER
-                if (unlikely(r == SOCKET_ERROR)) {
-                        if (WSAGetLastError() == WSAEWOULDBLOCK)
-                                return sum;
-                        rd_snprintf(errstr, errstr_size, "%s",
-                                    socket_strerror(WSAGetLastError()));
-                        return -1;
-                }
-#else
-                if (unlikely(r <= 0)) {
-                        if (r == -1 && socket_errno == EAGAIN)
-                                return 0;
-                        else if (r == 0) {
-                                /* Receive 0 after POLLIN event means
-                                 * connection closed. */
-                                rd_snprintf(errstr, errstr_size,
-                                            "Disconnected");
-                                return -1;
-                        } else if (r == -1) {
-                                rd_snprintf(errstr, errstr_size, "%s",
-                                            rd_strerror(errno));
-                                return -1;
-                        }
-                }
-#endif
-
-                /* Update buffer write position */
-                rd_buf_write(rbuf, NULL, (size_t)r);
-
-                sum += r;
-
-                /* FIXME: remove this and try again immediately and let
-                 *        the next recv() call fail instead? */
-                if ((size_t)r < len)
-                        break;
-        }
-        return sum;
-}
-
-
-static ssize_t
-rd_kafka_transport_socket_recv (rd_kafka_transport_t *rktrans,
-                                rd_buf_t *buf,
-                                char *errstr, size_t errstr_size) {
-#ifndef _MSC_VER
-        /* FIXME: Use recvmsg() with iovecs if there's more than one segment
-         * remaining, otherwise (or if platform does not have sendmsg)
-         * use plain send(). */
-        return rd_kafka_transport_socket_recvmsg(rktrans, buf,
-                                                 errstr, errstr_size);
-#endif
-        return rd_kafka_transport_socket_recv0(rktrans, buf,
-                                               errstr, errstr_size);
-}
-
-
-
-
-
-/**
- * CONNECT state is failed (errstr!=NULL) or done (TCP is up, SSL is working..).
- * From this state we either hand control back to the broker code,
- * or if authentication is configured we ente the AUTH state.
- */
-void rd_kafka_transport_connect_done (rd_kafka_transport_t *rktrans,
-				      char *errstr) {
-	rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
-
-	rd_kafka_broker_connect_done(rkb, errstr);
-}
-
-
-
-#if WITH_SSL
-
-
-/**
- * Serves the entire OpenSSL error queue and logs each error.
- * The last error is not logged but returned in 'errstr'.
- *
- * If 'rkb' is non-NULL broker-specific logging will be used,
- * else it will fall back on global 'rk' debugging.
- */
-static char *rd_kafka_ssl_error (rd_kafka_t *rk, rd_kafka_broker_t *rkb,
-				 char *errstr, size_t errstr_size) {
-    unsigned long l;
-    const char *file, *data;
-    int line, flags;
-    int cnt = 0;
-
-    while ((l = ERR_get_error_line_data(&file, &line, &data, &flags)) != 0) {
-	char buf[256];
-
-	if (cnt++ > 0) {
-		/* Log last message */
-		if (rkb)
-			rd_rkb_log(rkb, LOG_ERR, "SSL", "%s", errstr);
-		else
-			rd_kafka_log(rk, LOG_ERR, "SSL", "%s", errstr);
-	}
-	
-	ERR_error_string_n(l, buf, sizeof(buf));
-
-	rd_snprintf(errstr, errstr_size, "%s:%d: %s: %s",
-		    file, line, buf, (flags & ERR_TXT_STRING) ? data : "");
-
-    }
-
-    if (cnt == 0)
-    	    rd_snprintf(errstr, errstr_size, "No error");
-    
-    return errstr;
-}
-
-
-static void rd_kafka_transport_ssl_lock_cb (int mode, int i,
-					    const char *file, int line) {
-	if (mode & CRYPTO_LOCK)
-		mtx_lock(&rd_kafka_ssl_locks[i]);
-	else
-		mtx_unlock(&rd_kafka_ssl_locks[i]);
-}
-
-static unsigned long rd_kafka_transport_ssl_threadid_cb (void) {
-#ifdef _MSC_VER
-        /* Windows makes a distinction between thread handle
-         * and thread id, which means we can't use the
-         * thrd_current() API that returns the handle. */
-        return (unsigned long)GetCurrentThreadId();
-#else
-        return (unsigned long)(intptr_t)thrd_current();
-#endif
-}
-
-
-/**
- * Global OpenSSL cleanup.
- */
-void rd_kafka_transport_ssl_term (void) {
-	int i;
-
-	CRYPTO_set_id_callback(NULL);
-	CRYPTO_set_locking_callback(NULL);
-        CRYPTO_cleanup_all_ex_data();
-
-	for (i = 0 ; i < rd_kafka_ssl_locks_cnt ; i++)
-		mtx_destroy(&rd_kafka_ssl_locks[i]);
-
-	rd_free(rd_kafka_ssl_locks);
-
-}
-
-
-/**
- * Global OpenSSL init.
- */
-void rd_kafka_transport_ssl_init (void) {
-	int i;
-	
-	rd_kafka_ssl_locks_cnt = CRYPTO_num_locks();
-	rd_kafka_ssl_locks = rd_malloc(rd_kafka_ssl_locks_cnt *
-				       sizeof(*rd_kafka_ssl_locks));
-	for (i = 0 ; i < rd_kafka_ssl_locks_cnt ; i++)
-		mtx_init(&rd_kafka_ssl_locks[i], mtx_plain);
-
-	CRYPTO_set_id_callback(rd_kafka_transport_ssl_threadid_cb);
-	CRYPTO_set_locking_callback(rd_kafka_transport_ssl_lock_cb);
-	
-	SSL_load_error_strings();
-	SSL_library_init();
-	OpenSSL_add_all_algorithms();
-}
-
-
-/**
- * Set transport IO event polling based on SSL error.
- *
- * Returns -1 on permanent errors.
- *
- * Locality: broker thread
- */
-static RD_INLINE int
-rd_kafka_transport_ssl_io_update (rd_kafka_transport_t *rktrans, int ret,
-				  char *errstr, size_t errstr_size) {
-	int serr = SSL_get_error(rktrans->rktrans_ssl, ret);
-	int serr2;
-
-	switch (serr)
-	{
-	case SSL_ERROR_WANT_READ:
-		rd_kafka_transport_poll_set(rktrans, POLLIN);
-		break;
-
-	case SSL_ERROR_WANT_WRITE:
-	case SSL_ERROR_WANT_CONNECT:
-		rd_kafka_transport_poll_set(rktrans, POLLOUT);
-		break;
-
-	case SSL_ERROR_SYSCALL:
-		if (!(serr2 = SSL_get_error(rktrans->rktrans_ssl, ret))) {
-			if (ret == 0)
-				errno = ECONNRESET;
-			rd_snprintf(errstr, errstr_size,
-				    "SSL syscall error: %s", rd_strerror(errno));
-		} else
-			rd_snprintf(errstr, errstr_size,
-				    "SSL syscall error number: %d: %s", serr2,
-				    rd_strerror(errno));
-		return -1;
-
-        case SSL_ERROR_ZERO_RETURN:
-                rd_snprintf(errstr, errstr_size, "Disconnected");
-                return -1;
-
-	default:
-		rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb,
-				   errstr, errstr_size);
-		return -1;
-	}
-
-	return 0;
-}
-
-static ssize_t
-rd_kafka_transport_ssl_send (rd_kafka_transport_t *rktrans,
-                             rd_slice_t *slice,
-                             char *errstr, size_t errstr_size) {
-	ssize_t sum = 0;
-        const void *p;
-        size_t rlen;
-
-        while ((rlen = rd_slice_peeker(slice, &p))) {
-                int r;
-
-                r = SSL_write(rktrans->rktrans_ssl, p, (int)rlen);
-
-		if (unlikely(r <= 0)) {
-			if (rd_kafka_transport_ssl_io_update(rktrans, r,
-							     errstr,
-							     errstr_size) == -1)
-				return -1;
-			else
-				return sum;
-		}
-
-                /* Update buffer read position */
-                rd_slice_read(slice, NULL, (size_t)r);
-
-		sum += r;
-                 /* FIXME: remove this and try again immediately and let
-                  *        the next SSL_write() call fail instead? */
-                if ((size_t)r < rlen)
-                        break;
-
-	}
-	return sum;
-}
-
-static ssize_t
-rd_kafka_transport_ssl_recv (rd_kafka_transport_t *rktrans,
-                             rd_buf_t *rbuf, char *errstr, size_t errstr_size) {
-	ssize_t sum = 0;
-        void *p;
-        size_t len;
-
-        while ((len = rd_buf_get_writable(rbuf, &p))) {
-		int r;
-
-                r = SSL_read(rktrans->rktrans_ssl, p, (int)len);
-
-		if (unlikely(r <= 0)) {
-			if (rd_kafka_transport_ssl_io_update(rktrans, r,
-							     errstr,
-							     errstr_size) == -1)
-				return -1;
-			else
-				return sum;
-		}
-
-                VALGRIND_MAKE_MEM_DEFINED(p, r);
-
-                /* Update buffer write position */
-                rd_buf_write(rbuf, NULL, (size_t)r);
-
-		sum += r;
-
-                 /* FIXME: remove this and try again immediately and let
-                  *        the next SSL_read() call fail instead? */
-                if ((size_t)r < len)
-                        break;
-
-	}
-	return sum;
-
-}
-
-
-/**
- * OpenSSL password query callback
- *
- * Locality: application thread
- */
-static int rd_kafka_transport_ssl_passwd_cb (char *buf, int size, int rwflag,
-					     void *userdata) {
-	rd_kafka_t *rk = userdata;
-	int pwlen;
-
-	rd_kafka_dbg(rk, SECURITY, "SSLPASSWD",
-		     "Private key file \"%s\" requires password",
-		     rk->rk_conf.ssl.key_location);
-
-	if (!rk->rk_conf.ssl.key_password) {
-		rd_kafka_log(rk, LOG_WARNING, "SSLPASSWD",
-			     "Private key file \"%s\" requires password but "
-			     "no password configured (ssl.key.password)",
-			     rk->rk_conf.ssl.key_location);
-		return -1;
-	}
-
-
-	pwlen = (int) strlen(rk->rk_conf.ssl.key_password);
-	memcpy(buf, rk->rk_conf.ssl.key_password, RD_MIN(pwlen, size));
-
-	return pwlen;
-}
-
-/**
- * Set up SSL for a newly connected connection
- *
- * Returns -1 on failure, else 0.
- */
-static int rd_kafka_transport_ssl_connect (rd_kafka_broker_t *rkb,
-					   rd_kafka_transport_t *rktrans,
-					   char *errstr, size_t errstr_size) {
-	int r;
-	char name[RD_KAFKA_NODENAME_SIZE];
-	char *t;
-
-	rktrans->rktrans_ssl = SSL_new(rkb->rkb_rk->rk_conf.ssl.ctx);
-	if (!rktrans->rktrans_ssl)
-		goto fail;
-
-	if (!SSL_set_fd(rktrans->rktrans_ssl, rktrans->rktrans_s))
-		goto fail;
-
-#if (OPENSSL_VERSION_NUMBER >= 0x0090806fL) && !defined(OPENSSL_NO_TLSEXT)
-	/* If non-numerical hostname, send it for SNI */
-	rd_snprintf(name, sizeof(name), "%s", rkb->rkb_nodename);
-	if ((t = strrchr(name, ':')))
-		*t = '\0';
-	if (!(/*ipv6*/(strchr(name, ':') &&
-		       strspn(name, "0123456789abcdefABCDEF:.[]%") == strlen(name)) ||
-	      /*ipv4*/strspn(name, "0123456789.") == strlen(name)) &&
-	    !SSL_set_tlsext_host_name(rktrans->rktrans_ssl, name))
-		goto fail;
-#endif
-
-	r = SSL_connect(rktrans->rktrans_ssl);
-	if (r == 1) {
-		/* Connected, highly unlikely since this is a
-		 * non-blocking operation. */
-		rd_kafka_transport_connect_done(rktrans, NULL);
-		return 0;
-	}
-
-		
-	if (rd_kafka_transport_ssl_io_update(rktrans, r,
-					     errstr, errstr_size) == -1)
-		return -1;
-	
-	return 0;
-
- fail:
-	rd_kafka_ssl_error(NULL, rkb, errstr, errstr_size);
-	return -1;
-}
-
-
-static RD_UNUSED int
-rd_kafka_transport_ssl_io_event (rd_kafka_transport_t *rktrans, int events) {
-	int r;
-	char errstr[512];
-
-	if (events & POLLOUT) {
-		r = SSL_write(rktrans->rktrans_ssl, NULL, 0);
-		if (rd_kafka_transport_ssl_io_update(rktrans, r,
-						     errstr,
-						     sizeof(errstr)) == -1)
-			goto fail;
-	}
-
-	return 0;
-
- fail:
-	/* Permanent error */
-	rd_kafka_broker_fail(rktrans->rktrans_rkb, LOG_ERR,
-                             RD_KAFKA_RESP_ERR__TRANSPORT,
-			     "%s", errstr);
-	return -1;
-}
-
-
-/**
- * Verify SSL handshake was valid.
- */
-static int rd_kafka_transport_ssl_verify (rd_kafka_transport_t *rktrans) {
-	long int rl;
-	X509 *cert;
-
-	cert = SSL_get_peer_certificate(rktrans->rktrans_ssl);
-	X509_free(cert);
-	if (!cert) {
-		rd_kafka_broker_fail(rktrans->rktrans_rkb, LOG_ERR,
-				     RD_KAFKA_RESP_ERR__SSL,
-				     "Broker did not provide a certificate");
-		return -1;
-	}
-
-	if ((rl = SSL_get_verify_result(rktrans->rktrans_ssl)) != X509_V_OK) {
-		rd_kafka_broker_fail(rktrans->rktrans_rkb, LOG_ERR,
-				     RD_KAFKA_RESP_ERR__SSL,
-				     "Failed to verify broker certificate: %s",
-				     X509_verify_cert_error_string(rl));
-		return -1;
-	}
-
-	rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SSLVERIFY",
-		   "Broker SSL certificate verified");
-	return 0;
-}
-
-/**
- * SSL handshake handling.
- * Call repeatedly (based on IO events) until handshake is done.
- *
- * Returns -1 on error, 0 if handshake is still in progress, or 1 on completion.
- */
-static int rd_kafka_transport_ssl_handshake (rd_kafka_transport_t *rktrans) {
-	rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
-	char errstr[512];
-	int r;
-
-	r = SSL_do_handshake(rktrans->rktrans_ssl);
-	if (r == 1) {
-		/* SSL handshake done. Verify. */
-		if (rd_kafka_transport_ssl_verify(rktrans) == -1)
-			return -1;
-
-		rd_kafka_transport_connect_done(rktrans, NULL);
-		return 1;
-
-	} else if (rd_kafka_transport_ssl_io_update(rktrans, r,
-						    errstr,
-						    sizeof(errstr)) == -1) {
-		rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__SSL,
-				     "SSL handshake failed: %s%s", errstr,
-				     strstr(errstr, "unexpected message") ?
-				     ": client authentication might be "
-				     "required (see broker log)" : "");
-		return -1;
-	}
-
-	return 0;
-}
-
-
-/**
- * Once per rd_kafka_t handle cleanup of OpenSSL
- *
- * Locality: any thread
- *
- * NOTE: rd_kafka_wrlock() MUST be held
- */
-void rd_kafka_transport_ssl_ctx_term (rd_kafka_t *rk) {
-	SSL_CTX_free(rk->rk_conf.ssl.ctx);
-	rk->rk_conf.ssl.ctx = NULL;
-}
-
-/**
- * Once per rd_kafka_t handle initialization of OpenSSL
- *
- * Locality: application thread
- *
- * NOTE: rd_kafka_wrlock() MUST be held
- */
-int rd_kafka_transport_ssl_ctx_init (rd_kafka_t *rk,
-				     char *errstr, size_t errstr_size) {
-	int r;
-	SSL_CTX *ctx;
-
-        if (errstr_size > 0)
-                errstr[0] = '\0';
-
-	ctx = SSL_CTX_new(SSLv23_client_method());
-        if (!ctx) {
-                rd_snprintf(errstr, errstr_size,
-                            "SSLv23_client_method() failed: ");
-                goto fail;
-        }
-
-#ifdef SSL_OP_NO_SSLv3
-	/* Disable SSLv3 (unsafe) */
-	SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv3);
-#endif
-
-	/* Key file password callback */
-	SSL_CTX_set_default_passwd_cb(ctx, rd_kafka_transport_ssl_passwd_cb);
-	SSL_CTX_set_default_passwd_cb_userdata(ctx, rk);
-
-	/* Ciphers */
-	if (rk->rk_conf.ssl.cipher_suites) {
-		rd_kafka_dbg(rk, SECURITY, "SSL",
-			     "Setting cipher list: %s",
-			     rk->rk_conf.ssl.cipher_suites);
-		if (!SSL_CTX_set_cipher_list(ctx,
-					     rk->rk_conf.ssl.cipher_suites)) {
-                        /* Set a string that will prefix the
-                         * the OpenSSL error message (which is lousy)
-                         * to make it more meaningful. */
-                        rd_snprintf(errstr, errstr_size,
-                                    "ssl.cipher.suites failed: ");
-                        goto fail;
-		}
-	}
-
-
-	if (rk->rk_conf.ssl.ca_location) {
-		/* CA certificate location, either file or directory. */
-		int is_dir = rd_kafka_path_is_dir(rk->rk_conf.ssl.ca_location);
-
-		rd_kafka_dbg(rk, SECURITY, "SSL",
-			     "Loading CA certificate(s) from %s %s",
-			     is_dir ? "directory":"file",
-			     rk->rk_conf.ssl.ca_location);
-		
-		r = SSL_CTX_load_verify_locations(ctx,
-						  !is_dir ?
-						  rk->rk_conf.ssl.
-						  ca_location : NULL,
-						  is_dir ?
-						  rk->rk_conf.ssl.
-						  ca_location : NULL);
-
-                if (r != 1) {
-                        rd_snprintf(errstr, errstr_size,
-                                    "ssl.ca.location failed: ");
-                        goto fail;
-                }
-        } else {
-                /* Use default CA certificate paths: ignore failures. */
-                r = SSL_CTX_set_default_verify_paths(ctx);
-                if (r != 1)
-                        rd_kafka_dbg(rk, SECURITY, "SSL",
-                                     "SSL_CTX_set_default_verify_paths() "
-                                     "failed: ignoring");
-        }
-
-	if (rk->rk_conf.ssl.crl_location) {
-		rd_kafka_dbg(rk, SECURITY, "SSL",
-			     "Loading CRL from file %s",
-			     rk->rk_conf.ssl.crl_location);
-
-		r = SSL_CTX_load_verify_locations(ctx,
-						  rk->rk_conf.ssl.crl_location,
-						  NULL);
-
-                if (r != 1) {
-                        rd_snprintf(errstr, errstr_size,
-                                    "ssl.crl.location failed: ");
-                        goto fail;
-                }
-
-
-		rd_kafka_dbg(rk, SECURITY, "SSL",
-			     "Enabling CRL checks");
-
-		X509_STORE_set_flags(SSL_CTX_get_cert_store(ctx),
-				     X509_V_FLAG_CRL_CHECK);
-	}
-
-	if (rk->rk_conf.ssl.cert_location) {
-		rd_kafka_dbg(rk, SECURITY, "SSL",
-			     "Loading certificate from file %s",
-			     rk->rk_conf.ssl.cert_location);
-
-		r = SSL_CTX_use_certificate_chain_file(ctx,
-						       rk->rk_conf.ssl.cert_location);
-
-                if (r != 1) {
-                        rd_snprintf(errstr, errstr_size,
-                                    "ssl.certificate.location failed: ");
-                        goto fail;
-                }
-	}
-
-	if (rk->rk_conf.ssl.key_location) {
-		rd_kafka_dbg(rk, SECURITY, "SSL",
-			     "Loading private key file from %s",
-			     rk->rk_conf.ssl.key_location);
-
-		r = SSL_CTX_use_PrivateKey_file(ctx,
-						rk->rk_conf.ssl.key_location,
-						SSL_FILETYPE_PEM);
-                if (r != 1) {
-                        rd_snprintf(errstr, errstr_size,
-                                    "ssl.key.location failed: ");
-                        goto fail;
-                }
-	}
-
-
-	SSL_CTX_set_mode(ctx, SSL_MODE_ENABLE_PARTIAL_WRITE);
-
-	rk->rk_conf.ssl.ctx = ctx;
-	return 0;
-
- fail:
-        r = (int)strlen(errstr);
-        rd_kafka_ssl_error(rk, NULL, errstr+r,
-                           (int)errstr_size > r ? (int)errstr_size - r : 0);
-	SSL_CTX_free(ctx);
-
-	return -1;
-}
-
-
-#endif /* WITH_SSL */
-
-
-ssize_t
-rd_kafka_transport_send (rd_kafka_transport_t *rktrans,
-                         rd_slice_t *slice, char *errstr, size_t errstr_size) {
-
-#if WITH_SSL
-        if (rktrans->rktrans_ssl)
-                return rd_kafka_transport_ssl_send(rktrans, slice,
-                                                   errstr, errstr_size);
-        else
-#endif
-                return rd_kafka_transport_socket_send(rktrans, slice,
-                                                      errstr, errstr_size);
-}
-
-
-ssize_t
-rd_kafka_transport_recv (rd_kafka_transport_t *rktrans, rd_buf_t *rbuf,
-                         char *errstr, size_t errstr_size) {
-#if WITH_SSL
-	if (rktrans->rktrans_ssl)
-                return rd_kafka_transport_ssl_recv(rktrans, rbuf,
-                                                   errstr, errstr_size);
-	else
-#endif
-                return rd_kafka_transport_socket_recv(rktrans, rbuf,
-                                                      errstr, errstr_size);
-}
-
-
-
-
-/**
- * Length framed receive handling.
- * Currently only supports a the following framing:
- *     [int32_t:big_endian_length_of_payload][payload]
- *
- * To be used on POLLIN event, will return:
- *   -1: on fatal error (errstr will be updated, *rkbufp remains unset)
- *    0: still waiting for data (*rkbufp remains unset)
- *    1: data complete, (buffer returned in *rkbufp)
- */
-int rd_kafka_transport_framed_recv (rd_kafka_transport_t *rktrans,
-                                    rd_kafka_buf_t **rkbufp,
-                                    char *errstr, size_t errstr_size) {
-	rd_kafka_buf_t *rkbuf = rktrans->rktrans_recv_buf;
-	ssize_t r;
-	const int log_decode_errors = LOG_ERR;
-
-	/* States:
-	 *   !rktrans_recv_buf: initial state; set up buf to receive header.
-	 *    rkbuf_totlen == 0:   awaiting header
-	 *    rkbuf_totlen > 0:    awaiting payload
-	 */
-
-	if (!rkbuf) {
-                rkbuf = rd_kafka_buf_new(1, 4/*length field's length*/);
-                /* Set up buffer reader for the length field */
-                rd_buf_write_ensure(&rkbuf->rkbuf_buf, 4, 4);
-		rktrans->rktrans_recv_buf = rkbuf;
-	}
-
-
-        r = rd_kafka_transport_recv(rktrans, &rkbuf->rkbuf_buf,
-                                    errstr, errstr_size);
-	if (r == 0)
-		return 0;
-	else if (r == -1)
-		return -1;
-
-	if (rkbuf->rkbuf_totlen == 0) {
-		/* Frame length not known yet. */
-		int32_t frame_len;
-
-		if (rd_buf_write_pos(&rkbuf->rkbuf_buf) < sizeof(frame_len)) {
-			/* Wait for entire frame header. */
-			return 0;
-		}
-
-                /* Initialize reader */
-                rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, 0, 4);
-
-		/* Reader header: payload length */
-		rd_kafka_buf_read_i32(rkbuf, &frame_len);
-
-		if (frame_len < 0 ||
-		    frame_len > rktrans->rktrans_rkb->
-		    rkb_rk->rk_conf.recv_max_msg_size) {
-			rd_snprintf(errstr, errstr_size,
-				    "Invalid frame size %"PRId32, frame_len);
-			return -1;
-		}
-
-		rkbuf->rkbuf_totlen = 4 + frame_len;
-		if (frame_len == 0) {
-			/* Payload is empty, we're done. */
-			rktrans->rktrans_recv_buf = NULL;
-			*rkbufp = rkbuf;
-			return 1;
-		}
-
-		/* Allocate memory to hold entire frame payload in contigious
-		 * memory. */
-                rd_buf_write_ensure_contig(&rkbuf->rkbuf_buf, frame_len);
-
-                /* Try reading directly, there is probably more data available*/
-                return rd_kafka_transport_framed_recv(rktrans, rkbufp,
-                                                      errstr, errstr_size);
-	}
-
-	if (rd_buf_write_pos(&rkbuf->rkbuf_buf) == rkbuf->rkbuf_totlen) {
-		/* Payload is complete. */
-		rktrans->rktrans_recv_buf = NULL;
-		*rkbufp = rkbuf;
-		return 1;
-	}
-
-	/* Wait for more data */
-	return 0;
-
- err_parse:
-	if (rkbuf)
-		rd_kafka_buf_destroy(rkbuf);
-        rd_snprintf(errstr, errstr_size, "Frame header parsing failed: %s",
-                    rd_kafka_err2str(rkbuf->rkbuf_err));
-	return -1;
-}
-
-
-/**
- * TCP connection established.
- * Set up socket options, SSL, etc.
- *
- * Locality: broker thread
- */
-static void rd_kafka_transport_connected (rd_kafka_transport_t *rktrans) {
-	rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
-        unsigned int slen;
-
-        rd_rkb_dbg(rkb, BROKER, "CONNECT",
-                   "Connected to %s",
-                   rd_sockaddr2str(rkb->rkb_addr_last,
-                                   RD_SOCKADDR2STR_F_PORT |
-                                   RD_SOCKADDR2STR_F_FAMILY));
-
-	/* Set socket send & receive buffer sizes if configuerd */
-	if (rkb->rkb_rk->rk_conf.socket_sndbuf_size != 0) {
-		if (setsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_SNDBUF,
-			       (void *)&rkb->rkb_rk->rk_conf.socket_sndbuf_size,
-			       sizeof(rkb->rkb_rk->rk_conf.
-				      socket_sndbuf_size)) == SOCKET_ERROR)
-			rd_rkb_log(rkb, LOG_WARNING, "SNDBUF",
-				   "Failed to set socket send "
-				   "buffer size to %i: %s",
-				   rkb->rkb_rk->rk_conf.socket_sndbuf_size,
-				   socket_strerror(socket_errno));
-	}
-
-	if (rkb->rkb_rk->rk_conf.socket_rcvbuf_size != 0) {
-		if (setsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_RCVBUF,
-			       (void *)&rkb->rkb_rk->rk_conf.socket_rcvbuf_size,
-			       sizeof(rkb->rkb_rk->rk_conf.
-				      socket_rcvbuf_size)) == SOCKET_ERROR)
-			rd_rkb_log(rkb, LOG_WARNING, "RCVBUF",
-				   "Failed to set socket receive "
-				   "buffer size to %i: %s",
-				   rkb->rkb_rk->rk_conf.socket_rcvbuf_size,
-				   socket_strerror(socket_errno));
-	}
-
-        /* Get send and receive buffer sizes to allow limiting
-         * the total number of bytes passed with iovecs to sendmsg()
-         * and recvmsg(). */
-        slen = sizeof(rktrans->rktrans_rcvbuf_size);
-        if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_RCVBUF,
-                       (void *)&rktrans->rktrans_rcvbuf_size,
-                       &slen) == SOCKET_ERROR) {
-                rd_rkb_log(rkb, LOG_WARNING, "RCVBUF",
-                           "Failed to get socket receive "
-                           "buffer size: %s: assuming 1MB",
-                           socket_strerror(socket_errno));
-                rktrans->rktrans_rcvbuf_size = 1024*1024;
-        } else if (rktrans->rktrans_rcvbuf_size < 1024 * 64)
-                rktrans->rktrans_rcvbuf_size = 1024*64; /* Use at least 64KB */
-
-        slen = sizeof(rktrans->rktrans_sndbuf_size);
-        if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_SNDBUF,
-                       (void *)&rktrans->rktrans_sndbuf_size,
-                       &slen) == SOCKET_ERROR) {
-                rd_rkb_log(rkb, LOG_WARNING, "RCVBUF",
-                           "Failed to get socket send "
-                           "buffer size: %s: assuming 1MB",
-                           socket_strerror(socket_errno));
-                rktrans->rktrans_sndbuf_size = 1024*1024;
-        } else if (rktrans->rktrans_sndbuf_size < 1024 * 64)
-                rktrans->rktrans_sndbuf_size = 1024*64; /* Use at least 64KB */
-
-
-#ifdef TCP_NODELAY
-	if (rkb->rkb_rk->rk_conf.socket_nagle_disable) {
-		int one = 1;
-		if (setsockopt(rktrans->rktrans_s, IPPROTO_TCP, TCP_NODELAY,
-			       (void *)&one, sizeof(one)) == SOCKET_ERROR)
-			rd_rkb_log(rkb, LOG_WARNING, "NAGLE",
-				   "Failed to disable Nagle (TCP_NODELAY) "
-				   "on socket %d: %s",
-				   socket_strerror(socket_errno));
-	}
-#endif
-
-
-#if WITH_SSL
-	if (rkb->rkb_proto == RD_KAFKA_PROTO_SSL ||
-	    rkb->rkb_proto == RD_KAFKA_PROTO_SASL_SSL) {
-		char errstr[512];
-
-		/* Set up SSL connection.
-		 * This is also an asynchronous operation so dont
-		 * propagate to broker_connect_done() just yet. */
-		if (rd_kafka_transport_ssl_connect(rkb, rktrans,
-						   errstr,
-						   sizeof(errstr)) == -1) {
-			rd_kafka_transport_connect_done(rktrans, errstr);
-			return;
-		}
-		return;
-	}
-#endif
-
-	/* Propagate connect success */
-	rd_kafka_transport_connect_done(rktrans, NULL);
-}
-
-
-
-/**
- * @brief the kernel SO_ERROR in \p errp for the given transport.
- * @returns 0 if getsockopt() was succesful (and \p and errp can be trusted),
- * else -1 in which case \p errp 's value is undefined.
- */
-static int rd_kafka_transport_get_socket_error (rd_kafka_transport_t *rktrans,
-						int *errp) {
-	socklen_t intlen = sizeof(*errp);
-
-	if (getsockopt(rktrans->rktrans_s, SOL_SOCKET,
-		       SO_ERROR, (void *)errp, &intlen) == -1) {
-		rd_rkb_dbg(rktrans->rktrans_rkb, BROKER, "SO_ERROR",
-			   "Failed to get socket error: %s",
-			   socket_strerror(socket_errno));
-		return -1;
-	}
-
-	return 0;
-}
-
-
-/**
- * IO event handler.
- *
- * Locality: broker thread
- */
-static void rd_kafka_transport_io_event (rd_kafka_transport_t *rktrans,
-					 int events) {
-	char errstr[512];
-	int r;
-	rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
-
-	switch (rkb->rkb_state)
-	{
-	case RD_KAFKA_BROKER_STATE_CONNECT:
-#if WITH_SSL
-		if (rktrans->rktrans_ssl) {
-			/* Currently setting up SSL connection:
-			 * perform handshake. */
-			rd_kafka_transport_ssl_handshake(rktrans);
-			return;
-		}
-#endif
-
-		/* Asynchronous connect finished, read status. */
-		if (!(events & (POLLOUT|POLLERR|POLLHUP)))
-			return;
-
-		if (rd_kafka_transport_get_socket_error(rktrans, &r) == -1) {
-			rd_kafka_broker_fail(
-                                rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT,
-                                "Connect to %s failed: "
-                                "unable to get status from "
-                                "socket %d: %s",
-                                rd_sockaddr2str(rkb->rkb_addr_last,
-                                                RD_SOCKADDR2STR_F_PORT |
-                                                RD_SOCKADDR2STR_F_FAMILY),
-                                rktrans->rktrans_s,
-                                rd_strerror(socket_errno));
-		} else if (r != 0) {
-			/* Connect failed */
-                        errno = r;
-			rd_snprintf(errstr, sizeof(errstr),
-				    "Connect to %s failed: %s",
-                                    rd_sockaddr2str(rkb->rkb_addr_last,
-                                                    RD_SOCKADDR2STR_F_PORT |
-                                                    RD_SOCKADDR2STR_F_FAMILY),
-                                    rd_strerror(r));
-
-			rd_kafka_transport_connect_done(rktrans, errstr);
-		} else {
-			/* Connect succeeded */
-			rd_kafka_transport_connected(rktrans);
-		}
-		break;
-
-	case RD_KAFKA_BROKER_STATE_AUTH:
-		/* SASL handshake */
-		if (rd_kafka_sasl_io_event(rktrans, events,
-					   errstr, sizeof(errstr)) == -1) {
-			errno = EINVAL;
-			rd_kafka_broker_fail(rkb, LOG_ERR,
-					     RD_KAFKA_RESP_ERR__AUTHENTICATION,
-					     "SASL authentication failure: %s",
-					     errstr);
-			return;
-		}
-		break;
-
-	case RD_KAFKA_BROKER_STATE_APIVERSION_QUERY:
-	case RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE:
-	case RD_KAFKA_BROKER_STATE_UP:
-	case RD_KAFKA_BROKER_STATE_UPDATE:
-
-		if (events & POLLIN) {
-			while (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP &&
-			       rd_kafka_recv(rkb) > 0)
-				;
-		}
-
-		if (events & POLLHUP) {
-			rd_kafka_broker_fail(rkb,
-                                             rkb->rkb_rk->rk_conf.
-                                             log_connection_close ?
-                                             LOG_NOTICE : LOG_DEBUG,
-                                             RD_KAFKA_RESP_ERR__TRANSPORT,
-					     "Connection closed");
-			return;
-		}
-
-		if (events & POLLOUT) {
-			while (rd_kafka_send(rkb) > 0)
-				;
-		}
-		break;
-
-	case RD_KAFKA_BROKER_STATE_INIT:
-	case RD_KAFKA_BROKER_STATE_DOWN:
-		rd_kafka_assert(rkb->rkb_rk, !*"bad state");
-	}
-}
-
-
-/**
- * Poll and serve IOs
- *
- * Locality: broker thread 
- */
-void rd_kafka_transport_io_serve (rd_kafka_transport_t *rktrans,
-                                  int timeout_ms) {
-	rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
-	int events;
-
-	if (rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight &&
-	    rd_kafka_bufq_cnt(&rkb->rkb_outbufs) > 0)
-		rd_kafka_transport_poll_set(rkb->rkb_transport, POLLOUT);
-
-	if ((events = rd_kafka_transport_poll(rktrans, timeout_ms)) <= 0)
-                return;
-
-        rd_kafka_transport_poll_clear(rktrans, POLLOUT);
-
-	rd_kafka_transport_io_event(rktrans, events);
-}
-
-
-/**
- * Initiate asynchronous connection attempt.
- *
- * Locality: broker thread
- */
-rd_kafka_transport_t *rd_kafka_transport_connect (rd_kafka_broker_t *rkb,
-						  const rd_sockaddr_inx_t *sinx,
-						  char *errstr,
-						  size_t errstr_size) {
-	rd_kafka_transport_t *rktrans;
-	int s = -1;
-	int on = 1;
-        int r;
-
-        rkb->rkb_addr_last = sinx;
-
-	s = rkb->rkb_rk->rk_conf.socket_cb(sinx->in.sin_family,
-					   SOCK_STREAM, IPPROTO_TCP,
-					   rkb->rkb_rk->rk_conf.opaque);
-	if (s == -1) {
-		rd_snprintf(errstr, errstr_size, "Failed to create socket: %s",
-			    socket_strerror(socket_errno));
-		return NULL;
-	}
-
-
-#ifdef SO_NOSIGPIPE
-	/* Disable SIGPIPE signalling for this socket on OSX */
-	if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on)) == -1) 
-		rd_rkb_dbg(rkb, BROKER, "SOCKET",
-			   "Failed to set SO_NOSIGPIPE: %s",
-			   socket_strerror(socket_errno));
-#endif
-
-	/* Enable TCP keep-alives, if configured. */
-	if (rkb->rkb_rk->rk_conf.socket_keepalive) {
-#ifdef SO_KEEPALIVE
-		if (setsockopt(s, SOL_SOCKET, SO_KEEPALIVE,
-			       (void *)&on, sizeof(on)) == SOCKET_ERROR)
-			rd_rkb_dbg(rkb, BROKER, "SOCKET",
-				   "Failed to set SO_KEEPALIVE: %s",
-				   socket_strerror(socket_errno));
-#else
-		rd_rkb_dbg(rkb, BROKER, "SOCKET",
-			   "System does not support "
-			   "socket.keepalive.enable (SO_KEEPALIVE)");
-#endif
-	}
-
-        /* Set the socket to non-blocking */
-        if ((r = rd_fd_set_nonblocking(s))) {
-                rd_snprintf(errstr, errstr_size,
-                            "Failed to set socket non-blocking: %s",
-                            socket_strerror(r));
-                goto err;
-        }
-
-	rd_rkb_dbg(rkb, BROKER, "CONNECT", "Connecting to %s (%s) "
-		   "with socket %i",
-		   rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_FAMILY |
-				   RD_SOCKADDR2STR_F_PORT),
-		   rd_kafka_secproto_names[rkb->rkb_proto], s);
-
-	/* Connect to broker */
-        if (rkb->rkb_rk->rk_conf.connect_cb) {
-                r = rkb->rkb_rk->rk_conf.connect_cb(
-                        s, (struct sockaddr *)sinx, RD_SOCKADDR_INX_LEN(sinx),
-                        rkb->rkb_name, rkb->rkb_rk->rk_conf.opaque);
-        } else {
-                if (connect(s, (struct sockaddr *)sinx,
-                            RD_SOCKADDR_INX_LEN(sinx)) == SOCKET_ERROR &&
-                    (socket_errno != EINPROGRESS
-#ifdef _MSC_VER
-                     && socket_errno != WSAEWOULDBLOCK
-#endif
-                            ))
-                        r = socket_errno;
-                else
-                        r = 0;
-        }
-
-        if (r != 0) {
-		rd_rkb_dbg(rkb, BROKER, "CONNECT",
-			   "couldn't connect to %s: %s (%i)",
-			   rd_sockaddr2str(sinx,
-					   RD_SOCKADDR2STR_F_PORT |
-					   RD_SOCKADDR2STR_F_FAMILY),
-			   socket_strerror(r), r);
-		rd_snprintf(errstr, errstr_size,
-			    "Failed to connect to broker at %s: %s",
-			    rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_NICE),
-			    socket_strerror(r));
-		goto err;
-	}
-
-	/* Create transport handle */
-	rktrans = rd_calloc(1, sizeof(*rktrans));
-	rktrans->rktrans_rkb = rkb;
-	rktrans->rktrans_s = s;
-	rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt++].fd = s;
-        if (rkb->rkb_wakeup_fd[0] != -1) {
-                rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt].events = POLLIN;
-                rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt++].fd = rkb->rkb_wakeup_fd[0];
-        }
-
-
-	/* Poll writability to trigger on connection success/failure. */
-	rd_kafka_transport_poll_set(rktrans, POLLOUT);
-
-	return rktrans;
-
- err:
-	if (s != -1)
-                rd_kafka_transport_close0(rkb->rkb_rk, s);
-
-	return NULL;
-}
-
-
-
-void rd_kafka_transport_poll_set(rd_kafka_transport_t *rktrans, int event) {
-	rktrans->rktrans_pfd[0].events |= event;
-}
-
-void rd_kafka_transport_poll_clear(rd_kafka_transport_t *rktrans, int event) {
-	rktrans->rktrans_pfd[0].events &= ~event;
-}
-
-
-int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout) {
-        int r;
-#ifndef _MSC_VER
-	r = poll(rktrans->rktrans_pfd, rktrans->rktrans_pfd_cnt, tmout);
-	if (r <= 0)
-		return r;
-#else
-	r = WSAPoll(rktrans->rktrans_pfd, rktrans->rktrans_pfd_cnt, tmout);
-	if (r == 0) {
-		/* Workaround for broken WSAPoll() while connecting:
-		 * failed connection attempts are not indicated at all by WSAPoll()
-		 * so we need to check the socket error when Poll returns 0.
-		 * Issue #525 */
-		r = ECONNRESET;
-		if (unlikely(rktrans->rktrans_rkb->rkb_state ==
-			     RD_KAFKA_BROKER_STATE_CONNECT &&
-			     (rd_kafka_transport_get_socket_error(rktrans,
-								  &r) == -1 ||
-			      r != 0))) {
-			char errstr[512];
-			errno = r;
-			rd_snprintf(errstr, sizeof(errstr),
-				    "Connect to %s failed: %s",
-				    rd_sockaddr2str(rktrans->rktrans_rkb->
-						    rkb_addr_last,
-						    RD_SOCKADDR2STR_F_PORT |
-                                                    RD_SOCKADDR2STR_F_FAMILY),
-                                    socket_strerror(r));
-			rd_kafka_transport_connect_done(rktrans, errstr);
-			return -1;
-		} else
-			return 0;
-	} else if (r == SOCKET_ERROR)
-		return -1;
-#endif
-        rd_atomic64_add(&rktrans->rktrans_rkb->rkb_c.wakeups, 1);
-
-        if (rktrans->rktrans_pfd[1].revents & POLLIN) {
-                /* Read wake-up fd data and throw away, just used for wake-ups*/
-                char buf[512];
-                if (rd_read((int)rktrans->rktrans_pfd[1].fd,
-                            buf, sizeof(buf)) == -1) {
-                        /* Ignore warning */
-                }
-        }
-
-        return rktrans->rktrans_pfd[0].revents;
-}
-
-
-
-
-
-#if 0
-/**
- * Global cleanup.
- * This is dangerous and SHOULD NOT be called since it will rip
- * the rug from under the application if it uses any of this functionality
- * in its own code. This means we might leak some memory on exit.
- */
-void rd_kafka_transport_term (void) {
-#ifdef _MSC_VER
-	(void)WSACleanup(); /* FIXME: dangerous */
-#endif
-}
-#endif
- 
-void rd_kafka_transport_init(void) {
-#ifdef _MSC_VER
-	WSADATA d;
-	(void)WSAStartup(MAKEWORD(2, 2), &d);
-#endif
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_transport.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_transport.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_transport.h
deleted file mode 100644
index fcd2580..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_transport.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2015, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#ifndef _MSC_VER
-#include <poll.h>
-#endif
-
-#include "rdbuf.h"
-#include "rdaddr.h"
-
-typedef struct rd_kafka_transport_s rd_kafka_transport_t;
-
-void rd_kafka_transport_io_serve (rd_kafka_transport_t *rktrans,
-                                  int timeout_ms);
-
-ssize_t rd_kafka_transport_send (rd_kafka_transport_t *rktrans,
-                                 rd_slice_t *slice,
-                                 char *errstr, size_t errstr_size);
-ssize_t rd_kafka_transport_recv (rd_kafka_transport_t *rktrans,
-                                 rd_buf_t *rbuf,
-                                 char *errstr, size_t errstr_size);
-int rd_kafka_transport_framed_recv (rd_kafka_transport_t *rktrans,
-                                    rd_kafka_buf_t **rkbufp,
-                                    char *errstr, size_t errstr_size);
-struct rd_kafka_broker_s;
-rd_kafka_transport_t *rd_kafka_transport_connect(struct rd_kafka_broker_s *rkb, const rd_sockaddr_inx_t *sinx,
-                                                 char *errstr, size_t errstr_size);
-void rd_kafka_transport_connect_done (rd_kafka_transport_t *rktrans,
-				      char *errstr);
-
-void rd_kafka_transport_close(rd_kafka_transport_t *rktrans);
-void rd_kafka_transport_poll_set(rd_kafka_transport_t *rktrans, int event);
-void rd_kafka_transport_poll_clear(rd_kafka_transport_t *rktrans, int event);
-int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout);
-
-#if WITH_SSL
-void rd_kafka_transport_ssl_ctx_term (rd_kafka_t *rk);
-int rd_kafka_transport_ssl_ctx_init (rd_kafka_t *rk,
-				     char *errstr, size_t errstr_size);
-
-void rd_kafka_transport_ssl_term (void);
-void rd_kafka_transport_ssl_init (void);
-#endif
-void rd_kafka_transport_term (void);
-void rd_kafka_transport_init(void);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_transport_int.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_transport_int.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_transport_int.h
deleted file mode 100644
index 8ae79b4..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_transport_int.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2015, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#pragma once
-
-/* This header file is to be used by .c files needing access to the
- * rd_kafka_transport_t struct internals. */
-
-#include "rdkafka_sasl.h"
-
-#if WITH_SSL
-#include <openssl/ssl.h>
-#include <openssl/err.h>
-#endif
-
-struct rd_kafka_transport_s {	
-	int rktrans_s;
-	
-	rd_kafka_broker_t *rktrans_rkb;
-
-#if WITH_SSL
-	SSL *rktrans_ssl;
-#endif
-
-	struct {
-                void *state;               /* SASL implementation
-                                            * state handle */
-
-                int           complete;    /* Auth was completed early
-					    * from the client's perspective
-					    * (but we might still have to
-                                            *  wait for server reply). */
-
-                /* SASL framing buffers */
-		struct msghdr msg;
-		struct iovec  iov[2];
-
-		char          *recv_buf;
-		int            recv_of;    /* Received byte count */
-		int            recv_len;   /* Expected receive length for
-					    * current frame. */
-	} rktrans_sasl;
-
-	rd_kafka_buf_t *rktrans_recv_buf;  /* Used with framed_recvmsg */
-
-        /* Two pollable fds:
-         * - TCP socket
-         * - wake-up fd
-         */
-#ifndef _MSC_VER
-        struct pollfd rktrans_pfd[2];
-#else
-        WSAPOLLFD rktrans_pfd[2];
-#endif
-        int rktrans_pfd_cnt;
-
-        size_t rktrans_rcvbuf_size;    /**< Socket receive buffer size */
-        size_t rktrans_sndbuf_size;    /**< Socket send buffer size */
-};
-

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdlist.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdlist.c b/thirdparty/librdkafka-0.11.1/src/rdlist.c
deleted file mode 100644
index 11cf14d..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdlist.c
+++ /dev/null
@@ -1,313 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdlist.h"
-
-
-void rd_list_dump (const char *what, const rd_list_t *rl) {
-        int i;
-        printf("%s: (rd_list_t*)%p cnt %d, size %d, elems %p:\n",
-               what, rl, rl->rl_cnt, rl->rl_size, rl->rl_elems);
-        for (i = 0 ; i < rl->rl_cnt ; i++)
-                printf("  #%d: %p at &%p\n", i,
-                       rl->rl_elems[i], &rl->rl_elems[i]);
-}
-
-void rd_list_grow (rd_list_t *rl, size_t size) {
-        rd_assert(!(rl->rl_flags & RD_LIST_F_FIXED_SIZE));
-        rl->rl_size += (int)size;
-        if (unlikely(rl->rl_size == 0))
-                return; /* avoid zero allocations */
-        rl->rl_elems = rd_realloc(rl->rl_elems,
-                                  sizeof(*rl->rl_elems) * rl->rl_size);
-}
-
-rd_list_t *
-rd_list_init (rd_list_t *rl, int initial_size, void (*free_cb) (void *)) {
-        memset(rl, 0, sizeof(*rl));
-
-	if (initial_size > 0)
-		rd_list_grow(rl, initial_size);
-
-        rl->rl_free_cb = free_cb;
-
-        return rl;
-}
-
-rd_list_t *rd_list_new (int initial_size, void (*free_cb) (void *)) {
-	rd_list_t *rl = malloc(sizeof(*rl));
-	rd_list_init(rl, initial_size, free_cb);
-	rl->rl_flags |= RD_LIST_F_ALLOCATED;
-	return rl;
-}
-
-void rd_list_prealloc_elems (rd_list_t *rl, size_t elemsize, size_t size) {
-	size_t allocsize;
-	char *p;
-	size_t i;
-
-	rd_assert(!rl->rl_elems);
-
-	/* Allocation layout:
-	 *   void *ptrs[cnt];
-	 *   elems[elemsize][cnt];
-	 */
-
-	allocsize = (sizeof(void *) * size) + (elemsize * size);
-	rl->rl_elems = rd_malloc(allocsize);
-
-	/* p points to first element's memory. */
-	p = (char *)&rl->rl_elems[size];
-
-	/* Pointer -> elem mapping */
-	for (i = 0 ; i < size ; i++, p += elemsize)
-		rl->rl_elems[i] = p;
-
-	rl->rl_size = (int)size;
-	rl->rl_cnt = 0;
-	rl->rl_flags |= RD_LIST_F_FIXED_SIZE;
-}
-
-
-void rd_list_free_cb (rd_list_t *rl, void *ptr) {
-        if (rl->rl_free_cb && ptr)
-                rl->rl_free_cb(ptr);
-}
-
-
-void *rd_list_add (rd_list_t *rl, void *elem) {
-        if (rl->rl_cnt == rl->rl_size)
-                rd_list_grow(rl, rl->rl_size ? rl->rl_size * 2 : 16);
-	rl->rl_flags &= ~RD_LIST_F_SORTED;
-	if (elem)
-		rl->rl_elems[rl->rl_cnt] = elem;
-	return rl->rl_elems[rl->rl_cnt++];
-}
-
-static void rd_list_remove0 (rd_list_t *rl, int idx) {
-        rd_assert(idx < rl->rl_cnt);
-
-        if (idx + 1 < rl->rl_cnt)
-                memmove(&rl->rl_elems[idx],
-                        &rl->rl_elems[idx+1],
-                        sizeof(*rl->rl_elems) * (rl->rl_cnt - (idx+1)));
-        rl->rl_cnt--;
-}
-
-void *rd_list_remove (rd_list_t *rl, void *match_elem) {
-        void *elem;
-        int i;
-
-        RD_LIST_FOREACH(elem, rl, i) {
-                if (elem == match_elem) {
-                        rd_list_remove0(rl, i);
-                        return elem;
-                }
-        }
-
-        return NULL;
-}
-
-
-void *rd_list_remove_cmp (rd_list_t *rl, void *match_elem,
-                          int (*cmp) (void *_a, void *_b)) {
-        void *elem;
-        int i;
-
-        RD_LIST_FOREACH(elem, rl, i) {
-                if (match_elem == cmp ||
-                    !cmp(elem, match_elem)) {
-                        rd_list_remove0(rl, i);
-                        return elem;
-                }
-        }
-
-        return NULL;
-}
-
-
-/**
- * Trampoline to avoid the double pointers in callbacks.
- *
- * rl_elems is a **, but to avoid having the application do the cumbersome
- * ** -> * casting we wrap this here and provide a simple * pointer to the
- * the callbacks.
- *
- * This is true for all list comparator uses, i.e., both sort() and find().
- */
-static RD_TLS int (*rd_list_cmp_curr) (const void *, const void *);
-
-static RD_INLINE
-int rd_list_cmp_trampoline (const void *_a, const void *_b) {
-	const void *a = *(const void **)_a, *b = *(const void **)_b;
-
-	return rd_list_cmp_curr(a, b);
-}
-
-void rd_list_sort (rd_list_t *rl, int (*cmp) (const void *, const void *)) {
-	rd_list_cmp_curr = cmp;
-        qsort(rl->rl_elems, rl->rl_cnt, sizeof(*rl->rl_elems),
-	      rd_list_cmp_trampoline);
-	rl->rl_flags |= RD_LIST_F_SORTED;
-}
-
-void rd_list_clear (rd_list_t *rl) {
-        rl->rl_cnt = 0;
-	rl->rl_flags &= ~RD_LIST_F_SORTED;
-}
-
-
-void rd_list_destroy (rd_list_t *rl) {
-
-        if (rl->rl_elems) {
-                int i;
-                if (rl->rl_free_cb) {
-                        for (i = 0 ; i < rl->rl_cnt ; i++)
-                                if (rl->rl_elems[i])
-                                        rl->rl_free_cb(rl->rl_elems[i]);
-                }
-
-		rd_free(rl->rl_elems);
-        }
-
-	if (rl->rl_flags & RD_LIST_F_ALLOCATED)
-		rd_free(rl);
-}
-
-
-void *rd_list_elem (const rd_list_t *rl, int idx) {
-        if (likely(idx < rl->rl_cnt))
-                return (void *)rl->rl_elems[idx];
-        return NULL;
-}
-
-void *rd_list_find (const rd_list_t *rl, const void *match,
-                    int (*cmp) (const void *, const void *)) {
-        int i;
-        const void *elem;
-
-	if (rl->rl_flags & RD_LIST_F_SORTED) {
-		void **r;
-		rd_list_cmp_curr = cmp;
-		r = bsearch(&match/*ptrptr to match elems*/,
-			    rl->rl_elems, rl->rl_cnt,
-			    sizeof(*rl->rl_elems), rd_list_cmp_trampoline);
-		return r ? *r : NULL;
-	}
-
-        RD_LIST_FOREACH(elem, rl, i) {
-                if (!cmp(match, elem))
-                        return (void *)elem;
-        }
-
-        return NULL;
-}
-
-
-int rd_list_cmp (const rd_list_t *a, rd_list_t *b,
-		 int (*cmp) (const void *, const void *)) {
-	int i;
-
-	i = a->rl_cnt - b->rl_cnt;
-	if (i)
-		return i;
-
-	for (i = 0 ; i < a->rl_cnt ; i++) {
-		int r = cmp(a->rl_elems[i], b->rl_elems[i]);
-		if (r)
-			return r;
-	}
-
-	return 0;
-}
-
-
-/**
- * @brief Simple element pointer comparator
- */
-int rd_list_cmp_ptr (const void *a, const void *b) {
-        if (a < b)
-                return -1;
-        else if (a > b)
-                return 1;
-        return 0;
-}
-
-
-void rd_list_apply (rd_list_t *rl,
-                    int (*cb) (void *elem, void *opaque), void *opaque) {
-        void *elem;
-        int i;
-
-        RD_LIST_FOREACH(elem, rl, i) {
-                if (!cb(elem, opaque)) {
-                        rd_list_remove0(rl, i);
-                        i--;
-                }
-        }
-
-        return;
-}
-
-
-/**
- * @brief Default element copier that simply assigns the original pointer.
- */
-static void *rd_list_nocopy_ptr (const void *elem, void *opaque) {
-        return (void *)elem;
-}
-
-
-rd_list_t *rd_list_copy (const rd_list_t *src,
-                         void *(*copy_cb) (const void *elem, void *opaque),
-                         void *opaque) {
-        rd_list_t *dst;
-
-        dst = rd_list_new(src->rl_cnt, src->rl_free_cb);
-
-        rd_list_copy_to(dst, src, copy_cb, opaque);
-        return dst;
-}
-
-
-void rd_list_copy_to (rd_list_t *dst, const rd_list_t *src,
-                      void *(*copy_cb) (const void *elem, void *opaque),
-                      void *opaque) {
-        void *elem;
-        int i;
-
-        if (!copy_cb)
-                copy_cb = rd_list_nocopy_ptr;
-
-        RD_LIST_FOREACH(elem, src, i) {
-                void *celem = copy_cb(elem, opaque);
-                if (celem)
-                        rd_list_add(dst, celem);
-        }
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdlist.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdlist.h b/thirdparty/librdkafka-0.11.1/src/rdlist.h
deleted file mode 100644
index 27233e3..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdlist.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-
-/**
- *
- * Simple light-weight append-only list to be used as a collection convenience.
- *
- */
-
-typedef struct rd_list_s {
-        int    rl_size;
-        int    rl_cnt;
-        void **rl_elems;
-	void (*rl_free_cb) (void *);
-	int    rl_flags;
-#define RD_LIST_F_ALLOCATED  0x1  /* The rd_list_t is allocated,
-				   * will be free on destroy() */
-#define RD_LIST_F_SORTED     0x2  /* Set by sort(), cleared by any mutations.
-				   * When this flag is set bsearch() is used
-				   * by find(), otherwise a linear search. */
-#define RD_LIST_F_FIXED_SIZE 0x4  /* Assert on grow */
-#define RD_LIST_F_UNIQUE     0x8  /* Don't allow duplicates:
-                                   * ONLY ENFORCED BY CALLER. */
-} rd_list_t;
-
-
-/**
- * @brief Initialize a list, preallocate space for 'initial_size' elements
- *       (optional).
- *       List elements will optionally be freed by \p free_cb.
- *
- * @returns \p rl
- */
-rd_list_t *
-rd_list_init (rd_list_t *rl, int initial_size, void (*free_cb) (void *));
-
-
-/**
- * Allocate a new list pointer and initialize it according to rd_list_init().
- *
- * Use rd_list_destroy() to free.
- */
-rd_list_t *rd_list_new (int initial_size, void (*free_cb) (void *));
-
-
-/**
- * @brief Prepare list to for an additional \p size elements.
- *        This is an optimization to avoid incremental grows.
- */
-void rd_list_grow (rd_list_t *rl, size_t size);
-
-/**
- * @brief Preallocate elements to avoid having to pass an allocated pointer to
- *        rd_list_add(), instead pass NULL to rd_list_add() and use the returned
- *        pointer as the element.
- *
- * @param elemsize element size
- * @param size number of elements
- *
- * @remark Preallocated element lists can't grow past \p size.
- */
-void rd_list_prealloc_elems (rd_list_t *rl, size_t elemsize, size_t size);
-
-
-/**
- * @brief Free a pointer using the list's free_cb
- *
- * @remark If no free_cb is set, or \p ptr is NULL, dont do anything
- *
- * Typical use is rd_list_free_cb(rd_list_remove_cmp(....));
- */
-void rd_list_free_cb (rd_list_t *rl, void *ptr);
-
-
-/**
- * @brief Append element to list
- *
- * @returns \p elem. If \p elem is NULL the default element for that index
- *          will be returned (for use with set_elems).
- */
-void *rd_list_add (rd_list_t *rl, void *elem);
-
-
-/**
- * Remove element from list.
- * This is a slow O(n) + memmove operation.
- * Returns the removed element.
- */
-void *rd_list_remove (rd_list_t *rl, void *match_elem);
-
-/**
- * Remove element from list using comparator.
- * See rd_list_remove()
- */
-void *rd_list_remove_cmp (rd_list_t *rl, void *match_elem,
-                         int (*cmp) (void *_a, void *_b));
-
-/**
- * Sort list using comparator
- */
-void rd_list_sort (rd_list_t *rl, int (*cmp) (const void *, const void *));
-
-
-/**
- * Empties the list (but does not free any memory)
- */
-void rd_list_clear (rd_list_t *rl);
-
-
-/**
- * Empties the list, frees the element array, and optionally frees
- * each element using the registered \c rl->rl_free_cb.
- *
- * If the list was previously allocated with rd_list_new() it will be freed.
- */
-void rd_list_destroy (rd_list_t *rl);
-
-
-/**
- * Returns the element at index 'idx', or NULL if out of range.
- *
- * Typical iteration is:
- *    int i = 0;
- *    my_type_t *obj;
- *    while ((obj = rd_list_elem(rl, i++)))
- *        do_something(obj);
- */
-void *rd_list_elem (const rd_list_t *rl, int idx);
-
-#define RD_LIST_FOREACH(elem,listp,idx) \
-        for (idx = 0 ; (elem = rd_list_elem(listp, idx)) ; idx++)
-
-#define RD_LIST_FOREACH_REVERSE(elem,listp,idx)                         \
-        for (idx = (listp)->rl_cnt-1 ;                                  \
-             idx >= 0 && (elem = rd_list_elem(listp, idx)) ; idx--)
-
-/**
- * Returns the number of elements in list.
- */
-static RD_INLINE RD_UNUSED int rd_list_cnt (const rd_list_t *rl) {
-        return rl->rl_cnt;
-}
-
-
-/**
- * Returns true if list is empty
- */
-#define rd_list_empty(rl) (rd_list_cnt(rl) == 0)
-
-
-
-/**
- * Find element using comparator
- * 'match' will be the first argument to 'cmp', and each element (up to a match)
- * will be the second argument to 'cmp'.
- */
-void *rd_list_find (const rd_list_t *rl, const void *match,
-                    int (*cmp) (const void *, const void *));
-
-
-
-/**
- * @brief Compare list \p a to \p b.
- *
- * @returns < 0 if a was "lesser" than b,
- *          > 0 if a was "greater" than b,
- *            0 if a and b are equal.
- */
-int rd_list_cmp (const rd_list_t *a, rd_list_t *b,
-                 int (*cmp) (const void *, const void *));
-
-/**
- * @brief Simple element pointer comparator
- */
-int rd_list_cmp_ptr (const void *a, const void *b);
-
-
-/**
- * @brief Apply \p cb to each element in list, if \p cb returns 0
- *        the element will be removed (but not freed).
- */
-void rd_list_apply (rd_list_t *rl,
-                    int (*cb) (void *elem, void *opaque), void *opaque);
-
-
-
-/**
- * @brief Copy list \p src, returning a new list,
- *        using optional \p copy_cb (per elem)
- */
-rd_list_t *rd_list_copy (const rd_list_t *src,
-                         void *(*copy_cb) (const void *elem, void *opaque),
-                         void *opaque);
-
-
-/**
- * @brief Copy list \p src to \p dst using optional \p copy_cb (per elem)
- * @remark The destination list is not initialized or copied by this function.
- * @remark copy_cb() may return NULL in which case no element is added,
- *                   but the copy callback might have done so itself.
- */
-void rd_list_copy_to (rd_list_t *dst, const rd_list_t *src,
-                      void *(*copy_cb) (const void *elem, void *opaque),
-                      void *opaque);
-
-/**
- * @brief String copier for rd_list_copy()
- */
-static RD_UNUSED
-void *rd_list_string_copy (const void *elem, void *opaque) {
-        return rd_strdup((const char *)elem);
-}
-
-
-/**
- * Debugging: Print list to stdout.
- */
-void rd_list_dump (const char *what, const rd_list_t *rl);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdlog.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdlog.c b/thirdparty/librdkafka-0.11.1/src/rdlog.c
deleted file mode 100644
index 3f0d29a..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdlog.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdlog.h"
-
-#include <stdarg.h>
-#include <string.h>
-#include <ctype.h>
-
-
-
-
-void rd_hexdump (FILE *fp, const char *name, const void *ptr, size_t len) {
-	const char *p = (const char *)ptr;
-	size_t of = 0;
-
-
-	if (name)
-		fprintf(fp, "%s hexdump (%"PRIusz" bytes):\n", name, len);
-
-	for (of = 0 ; of < len ; of += 16) {
-		char hexen[16*3+1];
-		char charen[16+1];
-		int hof = 0;
-
-		int cof = 0;
-		unsigned int i;
-
-		for (i = (unsigned int)of ; i < (unsigned int)of + 16 && i < len ; i++) {
-			hof += rd_snprintf(hexen+hof, sizeof(hexen)-hof,
-					   "%02x ",
-					   p[i] & 0xff);
-			cof += rd_snprintf(charen+cof, sizeof(charen)-cof, "%c",
-					   isprint((int)p[i]) ? p[i] : '.');
-		}
-		fprintf(fp, "%08zx: %-48s %-16s\n",
-			of, hexen, charen);
-	}
-}
-
-
-void rd_iov_print (const char *what, int iov_idx, const struct iovec *iov,
-                   int hexdump) {
-        printf("%s:  iov #%i: %"PRIusz"\n", what, iov_idx,
-               (size_t)iov->iov_len);
-        if (hexdump)
-                rd_hexdump(stdout, what, iov->iov_base, iov->iov_len);
-}
-
-
-void rd_msghdr_print (const char *what, const struct msghdr *msg,
-                      int hexdump) {
-        int i;
-        size_t len = 0;
-
-        printf("%s: iovlen %"PRIusz"\n", what, (size_t)msg->msg_iovlen);
-
-        for (i = 0 ; i < (int)msg->msg_iovlen ; i++) {
-                rd_iov_print(what, i, &msg->msg_iov[i], hexdump);
-                len += msg->msg_iov[i].iov_len;
-        }
-        printf("%s: ^ message was %"PRIusz" bytes in total\n", what, len);
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdlog.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdlog.h b/thirdparty/librdkafka-0.11.1/src/rdlog.h
deleted file mode 100644
index 95066e2..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdlog.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-void rd_hexdump (FILE *fp, const char *name, const void *ptr, size_t len);
-
-void rd_iov_print (const char *what, int iov_idx, const struct iovec *iov,
-                   int hexdump);
-struct msghdr;
-void rd_msghdr_print (const char *what, const struct msghdr *msg,
-                      int hexdump);

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdports.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdports.c b/thirdparty/librdkafka-0.11.1/src/rdports.c
deleted file mode 100644
index a34195b..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdports.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
-* librdkafka - Apache Kafka C library
-*
-* Copyright (c) 2016 Magnus Edenhill
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are met:
-*
-* 1. Redistributions of source code must retain the above copyright notice,
-*    this list of conditions and the following disclaimer.
-* 2. Redistributions in binary form must reproduce the above copyright notice,
-*    this list of conditions and the following disclaimer in the documentation
-*    and/or other materials provided with the distribution.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/**
- * System portability
- */
-
-#include "rd.h"
-
-
-#include <stdlib.h>
-
-/**
- * qsort_r substitute
- * This nicely explains why we wont bother with the native implementation
- * on Win32 (qsort_s), OSX/FreeBSD (qsort_r with diff args):
- * http://forum.theorex.tech/t/different-declarations-of-qsort-r-on-mac-and-linux/93/2
- */
-static RD_TLS int (*rd_qsort_r_cmp) (const void *, const void *, void *);
-static RD_TLS void *rd_qsort_r_arg;
-
-static RD_UNUSED
-int rd_qsort_r_trampoline (const void *a, const void *b) {
-        return rd_qsort_r_cmp(a, b, rd_qsort_r_arg);
-}
-
-void rd_qsort_r (void *base, size_t nmemb, size_t size,
-                 int (*compar)(const void *, const void *, void *),
-                 void *arg) {
-        rd_qsort_r_cmp = compar;
-        rd_qsort_r_arg = arg;
-        qsort(base, nmemb, size, rd_qsort_r_trampoline);
-        rd_qsort_r_cmp = NULL;
-        rd_qsort_r_arg = NULL;
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdports.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdports.h b/thirdparty/librdkafka-0.11.1/src/rdports.h
deleted file mode 100644
index 44cef55..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdports.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
-* librdkafka - Apache Kafka C library
-*
-* Copyright (c) 2016 Magnus Edenhill
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are met:
-*
-* 1. Redistributions of source code must retain the above copyright notice,
-*    this list of conditions and the following disclaimer.
-* 2. Redistributions in binary form must reproduce the above copyright notice,
-*    this list of conditions and the following disclaimer in the documentation
-*    and/or other materials provided with the distribution.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*/
-#pragma once
-
-
-void rd_qsort_r (void *base, size_t nmemb, size_t size,
-              int (*compar)(const void *, const void *, void *),
-              void *arg);


[13/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/tinycthread.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/tinycthread.c b/thirdparty/librdkafka-0.11.1/src/tinycthread.c
deleted file mode 100644
index 0049db3..0000000
--- a/thirdparty/librdkafka-0.11.1/src/tinycthread.c
+++ /dev/null
@@ -1,1031 +0,0 @@
-/* -*- mode: c; tab-width: 2; indent-tabs-mode: nil; -*-
-Copyright (c) 2012 Marcus Geelnard
-Copyright (c) 2013-2014 Evan Nemerson
-
-This software is provided 'as-is', without any express or implied
-warranty. In no event will the authors be held liable for any damages
-arising from the use of this software.
-
-Permission is granted to anyone to use this software for any purpose,
-including commercial applications, and to alter it and redistribute it
-freely, subject to the following restrictions:
-
-    1. The origin of this software must not be misrepresented; you must not
-    claim that you wrote the original software. If you use this software
-    in a product, an acknowledgment in the product documentation would be
-    appreciated but is not required.
-
-    2. Altered source versions must be plainly marked as such, and must not be
-    misrepresented as being the original software.
-
-    3. This notice may not be removed or altered from any source
-    distribution.
-*/
-
-#include "rd.h"
-#include "rdtime.h"
-#include "tinycthread.h"
-#include <stdlib.h>
-
-/* Platform specific includes */
-#if defined(_TTHREAD_POSIX_)
-  #include <signal.h>
-  #include <sched.h>
-  #include <unistd.h>
-  #include <sys/time.h>
-  #include <errno.h>
-#elif defined(_TTHREAD_WIN32_)
-  #include <process.h>
-  #include <sys/timeb.h>
-#endif
-
-
-/* Standard, good-to-have defines */
-#ifndef NULL
-  #define NULL (void*)0
-#endif
-#ifndef TRUE
-  #define TRUE 1
-#endif
-#ifndef FALSE
-  #define FALSE 0
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-static RD_TLS int thrd_is_detached;
-
-
-int mtx_init(mtx_t *mtx, int type)
-{
-#if defined(_TTHREAD_WIN32_)
-  mtx->mAlreadyLocked = FALSE;
-  mtx->mRecursive = type & mtx_recursive;
-  mtx->mTimed = type & mtx_timed;
-  if (!mtx->mTimed)
-  {
-    InitializeCriticalSection(&(mtx->mHandle.cs));
-  }
-  else
-  {
-    mtx->mHandle.mut = CreateMutex(NULL, FALSE, NULL);
-    if (mtx->mHandle.mut == NULL)
-    {
-      return thrd_error;
-    }
-  }
-  return thrd_success;
-#else
-  int ret;
-  pthread_mutexattr_t attr;
-  pthread_mutexattr_init(&attr);
-  if (type & mtx_recursive)
-  {
-    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
-  }
-  ret = pthread_mutex_init(mtx, &attr);
-  pthread_mutexattr_destroy(&attr);
-  return ret == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-void mtx_destroy(mtx_t *mtx)
-{
-#if defined(_TTHREAD_WIN32_)
-  if (!mtx->mTimed)
-  {
-    DeleteCriticalSection(&(mtx->mHandle.cs));
-  }
-  else
-  {
-    CloseHandle(mtx->mHandle.mut);
-  }
-#else
-  pthread_mutex_destroy(mtx);
-#endif
-}
-
-int mtx_lock(mtx_t *mtx)
-{
-#if defined(_TTHREAD_WIN32_)
-  if (!mtx->mTimed)
-  {
-    EnterCriticalSection(&(mtx->mHandle.cs));
-  }
-  else
-  {
-    switch (WaitForSingleObject(mtx->mHandle.mut, INFINITE))
-    {
-      case WAIT_OBJECT_0:
-        break;
-      case WAIT_ABANDONED:
-      default:
-        return thrd_error;
-    }
-  }
-
-  if (!mtx->mRecursive)
-  {
-    while(mtx->mAlreadyLocked) Sleep(1); /* Simulate deadlock... */
-    mtx->mAlreadyLocked = TRUE;
-  }
-  return thrd_success;
-#else
-  return pthread_mutex_lock(mtx) == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-int mtx_timedlock(mtx_t *mtx, const struct timespec *ts)
-{
-#if defined(_TTHREAD_WIN32_)
-  struct timespec current_ts;
-  DWORD timeoutMs;
-
-  if (!mtx->mTimed)
-  {
-    return thrd_error;
-  }
-
-  timespec_get(&current_ts, TIME_UTC);
-
-  if ((current_ts.tv_sec > ts->tv_sec) || ((current_ts.tv_sec == ts->tv_sec) && (current_ts.tv_nsec >= ts->tv_nsec)))
-  {
-    timeoutMs = 0;
-  }
-  else
-  {
-    timeoutMs  = (DWORD)(ts->tv_sec  - current_ts.tv_sec)  * 1000;
-    timeoutMs += (ts->tv_nsec - current_ts.tv_nsec) / 1000000;
-    timeoutMs += 1;
-  }
-
-  /* TODO: the timeout for WaitForSingleObject doesn't include time
-     while the computer is asleep. */
-  switch (WaitForSingleObject(mtx->mHandle.mut, timeoutMs))
-  {
-    case WAIT_OBJECT_0:
-      break;
-    case WAIT_TIMEOUT:
-      return thrd_timedout;
-    case WAIT_ABANDONED:
-    default:
-      return thrd_error;
-  }
-
-  if (!mtx->mRecursive)
-  {
-    while(mtx->mAlreadyLocked) Sleep(1); /* Simulate deadlock... */
-    mtx->mAlreadyLocked = TRUE;
-  }
-
-  return thrd_success;
-#elif defined(_POSIX_TIMEOUTS) && (_POSIX_TIMEOUTS >= 200112L) && defined(_POSIX_THREADS) && (_POSIX_THREADS >= 200112L)
-  switch (pthread_mutex_timedlock(mtx, ts)) {
-    case 0:
-      return thrd_success;
-    case ETIMEDOUT:
-      return thrd_timedout;
-    default:
-      return thrd_error;
-  }
-#else
-  int rc;
-  struct timespec cur, dur;
-
-  /* Try to acquire the lock and, if we fail, sleep for 5ms. */
-  while ((rc = pthread_mutex_trylock (mtx)) == EBUSY) {
-    timespec_get(&cur, TIME_UTC);
-
-    if ((cur.tv_sec > ts->tv_sec) || ((cur.tv_sec == ts->tv_sec) && (cur.tv_nsec >= ts->tv_nsec)))
-    {
-      break;
-    }
-
-    dur.tv_sec = ts->tv_sec - cur.tv_sec;
-    dur.tv_nsec = ts->tv_nsec - cur.tv_nsec;
-    if (dur.tv_nsec < 0)
-    {
-      dur.tv_sec--;
-      dur.tv_nsec += 1000000000;
-    }
-
-    if ((dur.tv_sec != 0) || (dur.tv_nsec > 5000000))
-    {
-      dur.tv_sec = 0;
-      dur.tv_nsec = 5000000;
-    }
-
-    nanosleep(&dur, NULL);
-  }
-
-  switch (rc) {
-    case 0:
-      return thrd_success;
-    case ETIMEDOUT:
-    case EBUSY:
-      return thrd_timedout;
-    default:
-      return thrd_error;
-  }
-#endif
-}
-
-int mtx_trylock(mtx_t *mtx)
-{
-#if defined(_TTHREAD_WIN32_)
-  int ret;
-
-  if (!mtx->mTimed)
-  {
-    ret = TryEnterCriticalSection(&(mtx->mHandle.cs)) ? thrd_success : thrd_busy;
-  }
-  else
-  {
-    ret = (WaitForSingleObject(mtx->mHandle.mut, 0) == WAIT_OBJECT_0) ? thrd_success : thrd_busy;
-  }
-
-  if ((!mtx->mRecursive) && (ret == thrd_success))
-  {
-    if (mtx->mAlreadyLocked)
-    {
-      LeaveCriticalSection(&(mtx->mHandle.cs));
-      ret = thrd_busy;
-    }
-    else
-    {
-      mtx->mAlreadyLocked = TRUE;
-    }
-  }
-  return ret;
-#else
-  return (pthread_mutex_trylock(mtx) == 0) ? thrd_success : thrd_busy;
-#endif
-}
-
-int mtx_unlock(mtx_t *mtx)
-{
-#if defined(_TTHREAD_WIN32_)
-  mtx->mAlreadyLocked = FALSE;
-  if (!mtx->mTimed)
-  {
-    LeaveCriticalSection(&(mtx->mHandle.cs));
-  }
-  else
-  {
-    if (!ReleaseMutex(mtx->mHandle.mut))
-    {
-      return thrd_error;
-    }
-  }
-  return thrd_success;
-#else
-  return pthread_mutex_unlock(mtx) == 0 ? thrd_success : thrd_error;;
-#endif
-}
-
-#if defined(_TTHREAD_WIN32_)
-#define _CONDITION_EVENT_ONE 0
-#define _CONDITION_EVENT_ALL 1
-#endif
-
-int cnd_init(cnd_t *cond)
-{
-#if defined(_TTHREAD_WIN32_)
-  cond->mWaitersCount = 0;
-
-  /* Init critical section */
-  InitializeCriticalSection(&cond->mWaitersCountLock);
-
-  /* Init events */
-  cond->mEvents[_CONDITION_EVENT_ONE] = CreateEvent(NULL, FALSE, FALSE, NULL);
-  if (cond->mEvents[_CONDITION_EVENT_ONE] == NULL)
-  {
-    cond->mEvents[_CONDITION_EVENT_ALL] = NULL;
-    return thrd_error;
-  }
-  cond->mEvents[_CONDITION_EVENT_ALL] = CreateEvent(NULL, TRUE, FALSE, NULL);
-  if (cond->mEvents[_CONDITION_EVENT_ALL] == NULL)
-  {
-    CloseHandle(cond->mEvents[_CONDITION_EVENT_ONE]);
-    cond->mEvents[_CONDITION_EVENT_ONE] = NULL;
-    return thrd_error;
-  }
-
-  return thrd_success;
-#else
-  return pthread_cond_init(cond, NULL) == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-void cnd_destroy(cnd_t *cond)
-{
-#if defined(_TTHREAD_WIN32_)
-  if (cond->mEvents[_CONDITION_EVENT_ONE] != NULL)
-  {
-    CloseHandle(cond->mEvents[_CONDITION_EVENT_ONE]);
-  }
-  if (cond->mEvents[_CONDITION_EVENT_ALL] != NULL)
-  {
-    CloseHandle(cond->mEvents[_CONDITION_EVENT_ALL]);
-  }
-  DeleteCriticalSection(&cond->mWaitersCountLock);
-#else
-  pthread_cond_destroy(cond);
-#endif
-}
-
-int cnd_signal(cnd_t *cond)
-{
-#if defined(_TTHREAD_WIN32_)
-  int haveWaiters;
-
-  /* Are there any waiters? */
-  EnterCriticalSection(&cond->mWaitersCountLock);
-  haveWaiters = (cond->mWaitersCount > 0);
-  LeaveCriticalSection(&cond->mWaitersCountLock);
-
-  /* If we have any waiting threads, send them a signal */
-  if(haveWaiters)
-  {
-    if (SetEvent(cond->mEvents[_CONDITION_EVENT_ONE]) == 0)
-    {
-      return thrd_error;
-    }
-  }
-
-  return thrd_success;
-#else
-  return pthread_cond_signal(cond) == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-int cnd_broadcast(cnd_t *cond)
-{
-#if defined(_TTHREAD_WIN32_)
-  int haveWaiters;
-
-  /* Are there any waiters? */
-  EnterCriticalSection(&cond->mWaitersCountLock);
-  haveWaiters = (cond->mWaitersCount > 0);
-  LeaveCriticalSection(&cond->mWaitersCountLock);
-
-  /* If we have any waiting threads, send them a signal */
-  if(haveWaiters)
-  {
-    if (SetEvent(cond->mEvents[_CONDITION_EVENT_ALL]) == 0)
-    {
-      return thrd_error;
-    }
-  }
-
-  return thrd_success;
-#else
-  return pthread_cond_broadcast(cond) == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-#if defined(_TTHREAD_WIN32_)
-static int _cnd_timedwait_win32(cnd_t *cond, mtx_t *mtx, DWORD timeout)
-{
-  int result, lastWaiter;
-
-  /* Increment number of waiters */
-  EnterCriticalSection(&cond->mWaitersCountLock);
-  ++ cond->mWaitersCount;
-  LeaveCriticalSection(&cond->mWaitersCountLock);
-
-  /* Release the mutex while waiting for the condition (will decrease
-     the number of waiters when done)... */
-  mtx_unlock(mtx);
-
-  /* Wait for either event to become signaled due to cnd_signal() or
-     cnd_broadcast() being called */
-  result = WaitForMultipleObjects(2, cond->mEvents, FALSE, timeout);
-  if (result == WAIT_TIMEOUT)
-  {
-    /* The mutex is locked again before the function returns, even if an error occurred */
-    mtx_lock(mtx);
-    return thrd_timedout;
-  }
-  else if (result == (int)WAIT_FAILED)
-  {
-    /* The mutex is locked again before the function returns, even if an error occurred */
-    mtx_lock(mtx);
-    return thrd_error;
-  }
-
-  /* Check if we are the last waiter */
-  EnterCriticalSection(&cond->mWaitersCountLock);
-  -- cond->mWaitersCount;
-  lastWaiter = (result == (WAIT_OBJECT_0 + _CONDITION_EVENT_ALL)) &&
-               (cond->mWaitersCount == 0);
-  LeaveCriticalSection(&cond->mWaitersCountLock);
-
-  /* If we are the last waiter to be notified to stop waiting, reset the event */
-  if (lastWaiter)
-  {
-    if (ResetEvent(cond->mEvents[_CONDITION_EVENT_ALL]) == 0)
-    {
-      /* The mutex is locked again before the function returns, even if an error occurred */
-      mtx_lock(mtx);
-      return thrd_error;
-    }
-  }
-
-  /* Re-acquire the mutex */
-  mtx_lock(mtx);
-
-  return thrd_success;
-}
-#endif
-
-int cnd_wait(cnd_t *cond, mtx_t *mtx)
-{
-#if defined(_TTHREAD_WIN32_)
-  return _cnd_timedwait_win32(cond, mtx, INFINITE);
-#else
-  return pthread_cond_wait(cond, mtx) == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-int cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *ts)
-{
-#if defined(_TTHREAD_WIN32_)
-  struct timespec now;
-  if (timespec_get(&now, TIME_UTC) == TIME_UTC)
-  {
-    unsigned long long nowInMilliseconds = now.tv_sec * 1000 + now.tv_nsec / 1000000;
-    unsigned long long tsInMilliseconds  = ts->tv_sec * 1000 + ts->tv_nsec / 1000000;
-    DWORD delta = (tsInMilliseconds > nowInMilliseconds) ?
-      (DWORD)(tsInMilliseconds - nowInMilliseconds) : 0;
-    return _cnd_timedwait_win32(cond, mtx, delta);
-  }
-  else
-    return thrd_error;
-#else
-  int ret;
-  ret = pthread_cond_timedwait(cond, mtx, ts);
-  if (ret == ETIMEDOUT)
-  {
-    return thrd_timedout;
-  }
-  return ret == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-
-int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms) {
-  if (timeout_ms == -1 /* INFINITE*/)
-    return cnd_wait(cnd, mtx);
-#if defined(_TTHREAD_WIN32_)
-	return _cnd_timedwait_win32(cnd, mtx, (DWORD)timeout_ms);
-#else
-  int ret;
-	struct timeval tv;
-	struct timespec ts;
-
-	gettimeofday(&tv, NULL);
-  ts.tv_sec = tv.tv_sec;
-  ts.tv_nsec = tv.tv_usec * 1000;
-
-	ts.tv_sec  += timeout_ms / 1000;
-	ts.tv_nsec += (timeout_ms % 1000) * 1000000;
-
-	if (ts.tv_nsec >= 1000000000) {
-		ts.tv_sec++;
-		ts.tv_nsec -= 1000000000;
-	}
-
-  ret = pthread_cond_timedwait(cnd, mtx, &ts);
-  if (ret == ETIMEDOUT)
-  {
-    return thrd_timedout;
-  }
-  return ret == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-int cnd_timedwait_msp (cnd_t *cnd, mtx_t *mtx, int *timeout_msp) {
-        rd_ts_t pre = rd_clock();
-        int r;
-        r = cnd_timedwait_ms(cnd, mtx, *timeout_msp);
-        if (r != thrd_timedout) {
-                /* Subtract spent time */
-                (*timeout_msp) -= (int)(rd_clock()-pre) / 1000;
-        }
-        return r;
-}
-
-#if defined(_TTHREAD_WIN32_)
-struct TinyCThreadTSSData {
-  void* value;
-  tss_t key;
-  struct TinyCThreadTSSData* next;
-};
-
-static tss_dtor_t _tinycthread_tss_dtors[1088] = { NULL, };
-
-static _Thread_local struct TinyCThreadTSSData* _tinycthread_tss_head = NULL;
-static _Thread_local struct TinyCThreadTSSData* _tinycthread_tss_tail = NULL;
-
-static void _tinycthread_tss_cleanup (void);
-
-static void _tinycthread_tss_cleanup (void) {
-  struct TinyCThreadTSSData* data;
-  int iteration;
-  unsigned int again = 1;
-  void* value;
-
-  for (iteration = 0 ; iteration < TSS_DTOR_ITERATIONS && again > 0 ; iteration++)
-  {
-    again = 0;
-    for (data = _tinycthread_tss_head ; data != NULL ; data = data->next)
-    {
-      if (data->value != NULL)
-      {
-        value = data->value;
-        data->value = NULL;
-
-        if (_tinycthread_tss_dtors[data->key] != NULL)
-        {
-          again = 1;
-          _tinycthread_tss_dtors[data->key](value);
-        }
-      }
-    }
-  }
-
-  while (_tinycthread_tss_head != NULL) {
-    data = _tinycthread_tss_head->next;
-    free (_tinycthread_tss_head);
-    _tinycthread_tss_head = data;
-  }
-  _tinycthread_tss_head = NULL;
-  _tinycthread_tss_tail = NULL;
-}
-
-static void NTAPI _tinycthread_tss_callback(PVOID h, DWORD dwReason, PVOID pv)
-{
-  (void)h;
-  (void)pv;
-
-  if (_tinycthread_tss_head != NULL && (dwReason == DLL_THREAD_DETACH || dwReason == DLL_PROCESS_DETACH))
-  {
-    _tinycthread_tss_cleanup();
-  }
-}
-
-#if defined(_MSC_VER)
-  #ifdef _M_X64
-    #pragma const_seg(".CRT$XLB")
-  #else
-    #pragma data_seg(".CRT$XLB")
-  #endif
-  PIMAGE_TLS_CALLBACK p_thread_callback = _tinycthread_tss_callback;
-  #ifdef _M_X64
-    #pragma data_seg()
-  #else
-    #pragma const_seg()
-  #endif
-#else
-  PIMAGE_TLS_CALLBACK p_thread_callback __attribute__((section(".CRT$XLB"))) = _tinycthread_tss_callback;
-#endif
-
-#endif /* defined(_TTHREAD_WIN32_) */
-
-/** Information to pass to the new thread (what to run). */
-typedef struct {
-  thrd_start_t mFunction; /**< Pointer to the function to be executed. */
-  void * mArg;            /**< Function argument for the thread function. */
-} _thread_start_info;
-
-/* Thread wrapper function. */
-#if defined(_TTHREAD_WIN32_)
-static DWORD WINAPI _thrd_wrapper_function(LPVOID aArg)
-#elif defined(_TTHREAD_POSIX_)
-static void * _thrd_wrapper_function(void * aArg)
-#endif
-{
-  thrd_start_t fun;
-  void *arg;
-  int  res;
-
-  /* Get thread startup information */
-  _thread_start_info *ti = (_thread_start_info *) aArg;
-  fun = ti->mFunction;
-  arg = ti->mArg;
-
-  /* The thread is responsible for freeing the startup information */
-  free((void *)ti);
-
-  /* Call the actual client thread function */
-  res = fun(arg);
-
-#if defined(_TTHREAD_WIN32_)
-  if (_tinycthread_tss_head != NULL)
-  {
-    _tinycthread_tss_cleanup();
-  }
-
-  return (DWORD)res;
-#else
-  return (void*)(intptr_t)res;
-#endif
-}
-
-int thrd_create(thrd_t *thr, thrd_start_t func, void *arg)
-{
-  /* Fill out the thread startup information (passed to the thread wrapper,
-     which will eventually free it) */
-  _thread_start_info* ti = (_thread_start_info*)malloc(sizeof(_thread_start_info));
-  if (ti == NULL)
-  {
-    return thrd_nomem;
-  }
-  ti->mFunction = func;
-  ti->mArg = arg;
-
-  /* Create the thread */
-#if defined(_TTHREAD_WIN32_)
-  *thr = CreateThread(NULL, 0, _thrd_wrapper_function, (LPVOID) ti, 0, NULL);
-#elif defined(_TTHREAD_POSIX_)
-  {
-          int err;
-          if((err = pthread_create(thr, NULL, _thrd_wrapper_function,
-                                   (void *)ti)) != 0) {
-                  errno = err;
-                  *thr = 0;
-          }
-  }
-#endif
-
-  /* Did we fail to create the thread? */
-  if(!*thr)
-  {
-    free(ti);
-    return thrd_error;
-  }
-
-  return thrd_success;
-}
-
-thrd_t thrd_current(void)
-{
-#if defined(_TTHREAD_WIN32_)
-  return GetCurrentThread();
-#else
-  return pthread_self();
-#endif
-}
-
-int thrd_is_current(thrd_t thr) {
-#if defined(_TTHREAD_WIN32_)
-	return GetThreadId(thr) == GetCurrentThreadId();
-#else	
-	return (pthread_self() == thr);
-#endif
-}
-
-
-int thrd_detach(thrd_t thr)
-{
-  thrd_is_detached = 1;
-#if defined(_TTHREAD_WIN32_)
-  /* https://stackoverflow.com/questions/12744324/how-to-detach-a-thread-on-windows-c#answer-12746081 */
-  return CloseHandle(thr) != 0 ? thrd_success : thrd_error;
-#else
-  return pthread_detach(thr) == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-int thrd_equal(thrd_t thr0, thrd_t thr1)
-{
-#if defined(_TTHREAD_WIN32_)
-  return thr0 == thr1;
-#else
-  return pthread_equal(thr0, thr1);
-#endif
-}
-
-void thrd_exit(int res)
-{
-#if defined(_TTHREAD_WIN32_)
-  if (_tinycthread_tss_head != NULL)
-  {
-    _tinycthread_tss_cleanup();
-  }
-
-  ExitThread(res);
-#else
-  pthread_exit((void*)(intptr_t)res);
-#endif
-}
-
-int thrd_join(thrd_t thr, int *res)
-{
-#if defined(_TTHREAD_WIN32_)
-  DWORD dwRes;
-
-  if (WaitForSingleObject(thr, INFINITE) == WAIT_FAILED)
-  {
-    return thrd_error;
-  }
-  if (res != NULL)
-  {
-    if (GetExitCodeThread(thr, &dwRes) != 0)
-    {
-      *res = dwRes;
-    }
-    else
-    {
-      return thrd_error;
-    }
-  }
-  CloseHandle(thr);
-#elif defined(_TTHREAD_POSIX_)
-  void *pres;
-  if (pthread_join(thr, &pres) != 0)
-  {
-    return thrd_error;
-  }
-  if (res != NULL)
-  {
-    *res = (int)(intptr_t)pres;
-  }
-#endif
-  return thrd_success;
-}
-
-int thrd_sleep(const struct timespec *duration, struct timespec *remaining)
-{
-#if !defined(_TTHREAD_WIN32_)
-  return nanosleep(duration, remaining);
-#else
-  struct timespec start;
-  DWORD t;
-
-  timespec_get(&start, TIME_UTC);
-
-  t = SleepEx((DWORD)(duration->tv_sec * 1000 +
-              duration->tv_nsec / 1000000 +
-              (((duration->tv_nsec % 1000000) == 0) ? 0 : 1)),
-              TRUE);
-
-  if (t == 0) {
-    return 0;
-  } else if (remaining != NULL) {
-    timespec_get(remaining, TIME_UTC);
-    remaining->tv_sec -= start.tv_sec;
-    remaining->tv_nsec -= start.tv_nsec;
-    if (remaining->tv_nsec < 0)
-    {
-      remaining->tv_nsec += 1000000000;
-      remaining->tv_sec -= 1;
-    }
-  } else {
-    return -1;
-  }
-
-  return 0;
-#endif
-}
-
-void thrd_yield(void)
-{
-#if defined(_TTHREAD_WIN32_)
-  Sleep(0);
-#else
-  sched_yield();
-#endif
-}
-
-int tss_create(tss_t *key, tss_dtor_t dtor)
-{
-#if defined(_TTHREAD_WIN32_)
-  *key = TlsAlloc();
-  if (*key == TLS_OUT_OF_INDEXES)
-  {
-    return thrd_error;
-  }
-  _tinycthread_tss_dtors[*key] = dtor;
-#else
-  if (pthread_key_create(key, dtor) != 0)
-  {
-    return thrd_error;
-  }
-#endif
-  return thrd_success;
-}
-
-void tss_delete(tss_t key)
-{
-#if defined(_TTHREAD_WIN32_)
-  struct TinyCThreadTSSData* data = (struct TinyCThreadTSSData*) TlsGetValue (key);
-  struct TinyCThreadTSSData* prev = NULL;
-  if (data != NULL)
-  {
-    if (data == _tinycthread_tss_head)
-    {
-      _tinycthread_tss_head = data->next;
-    }
-    else
-    {
-      prev = _tinycthread_tss_head;
-      if (prev != NULL)
-      {
-        while (prev->next != data)
-        {
-          prev = prev->next;
-        }
-      }
-    }
-
-    if (data == _tinycthread_tss_tail)
-    {
-      _tinycthread_tss_tail = prev;
-    }
-
-    free (data);
-  }
-  _tinycthread_tss_dtors[key] = NULL;
-  TlsFree(key);
-#else
-  pthread_key_delete(key);
-#endif
-}
-
-void *tss_get(tss_t key)
-{
-#if defined(_TTHREAD_WIN32_)
-  struct TinyCThreadTSSData* data = (struct TinyCThreadTSSData*)TlsGetValue(key);
-  if (data == NULL)
-  {
-    return NULL;
-  }
-  return data->value;
-#else
-  return pthread_getspecific(key);
-#endif
-}
-
-int tss_set(tss_t key, void *val)
-{
-#if defined(_TTHREAD_WIN32_)
-  struct TinyCThreadTSSData* data = (struct TinyCThreadTSSData*)TlsGetValue(key);
-  if (data == NULL)
-  {
-    data = (struct TinyCThreadTSSData*)malloc(sizeof(struct TinyCThreadTSSData));
-    if (data == NULL)
-    {
-      return thrd_error;
-	}
-
-    data->value = NULL;
-    data->key = key;
-    data->next = NULL;
-
-    if (_tinycthread_tss_tail != NULL)
-    {
-      _tinycthread_tss_tail->next = data;
-    }
-    else
-    {
-      _tinycthread_tss_tail = data;
-    }
-
-    if (_tinycthread_tss_head == NULL)
-    {
-      _tinycthread_tss_head = data;
-    }
-
-    if (!TlsSetValue(key, data))
-    {
-      free (data);
-	  return thrd_error;
-    }
-  }
-  data->value = val;
-#else
-  if (pthread_setspecific(key, val) != 0)
-  {
-    return thrd_error;
-  }
-#endif
-  return thrd_success;
-}
-
-#if defined(_TTHREAD_EMULATE_TIMESPEC_GET_)
-int _tthread_timespec_get(struct timespec *ts, int base)
-{
-#if defined(_TTHREAD_WIN32_)
-  struct _timeb tb;
-#elif !defined(CLOCK_REALTIME)
-  struct timeval tv;
-#endif
-
-  if (base != TIME_UTC)
-  {
-    return 0;
-  }
-
-#if defined(_TTHREAD_WIN32_)
-  _ftime_s(&tb);
-  ts->tv_sec = (time_t)tb.time;
-  ts->tv_nsec = 1000000L * (long)tb.millitm;
-#elif defined(CLOCK_REALTIME)
-  base = (clock_gettime(CLOCK_REALTIME, ts) == 0) ? base : 0;
-#else
-  gettimeofday(&tv, NULL);
-  ts->tv_sec = (time_t)tv.tv_sec;
-  ts->tv_nsec = 1000L * (long)tv.tv_usec;
-#endif
-
-  return base;
-}
-#endif /* _TTHREAD_EMULATE_TIMESPEC_GET_ */
-
-#if defined(_TTHREAD_WIN32_)
-void call_once(once_flag *flag, void (*func)(void))
-{
-  /* The idea here is that we use a spin lock (via the
-     InterlockedCompareExchange function) to restrict access to the
-     critical section until we have initialized it, then we use the
-     critical section to block until the callback has completed
-     execution. */
-  while (flag->status < 3)
-  {
-    switch (flag->status)
-    {
-      case 0:
-        if (InterlockedCompareExchange (&(flag->status), 1, 0) == 0) {
-          InitializeCriticalSection(&(flag->lock));
-          EnterCriticalSection(&(flag->lock));
-          flag->status = 2;
-          func();
-          flag->status = 3;
-          LeaveCriticalSection(&(flag->lock));
-          return;
-        }
-        break;
-      case 1:
-        break;
-      case 2:
-        EnterCriticalSection(&(flag->lock));
-        LeaveCriticalSection(&(flag->lock));
-        break;
-    }
-  }
-}
-#endif /* defined(_TTHREAD_WIN32_) */
-
-
-#if !defined(_TTHREAD_WIN32_)
-int rwlock_init (rwlock_t *rwl) {
-        int r = pthread_rwlock_init(rwl, NULL);
-        if (r) {
-                errno = r;
-                return thrd_error;
-        }
-        return thrd_success;
-}
-
-int rwlock_destroy (rwlock_t *rwl) {
-        int r = pthread_rwlock_destroy(rwl);
-        if (r) {
-                errno = r;
-                return thrd_error;
-        }
-        return thrd_success;
-}
-
-int rwlock_rdlock (rwlock_t *rwl) {
-        int r = pthread_rwlock_rdlock(rwl);
-        assert(r == 0);
-        return thrd_success;
-}
-
-int rwlock_wrlock (rwlock_t *rwl) {
-        int r = pthread_rwlock_wrlock(rwl);
-        assert(r == 0);
-        return thrd_success;
-}
-
-int rwlock_rdunlock (rwlock_t *rwl) {
-        int r = pthread_rwlock_unlock(rwl);
-        assert(r == 0);
-        return thrd_success;
-}
-
-int rwlock_wrunlock (rwlock_t *rwl) {
-        int r = pthread_rwlock_unlock(rwl);
-        assert(r == 0);
-        return thrd_success;
-}
-
-#endif /* !defined(_TTHREAD_WIN32_) */
-
-#ifdef __cplusplus
-}
-#endif

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/tinycthread.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/tinycthread.h b/thirdparty/librdkafka-0.11.1/src/tinycthread.h
deleted file mode 100644
index 61010eb..0000000
--- a/thirdparty/librdkafka-0.11.1/src/tinycthread.h
+++ /dev/null
@@ -1,520 +0,0 @@
-/* -*- mode: c; tab-width: 2; indent-tabs-mode: nil; -*-
-Copyright (c) 2012 Marcus Geelnard
-Copyright (c) 2013-2014 Evan Nemerson
-
-This software is provided 'as-is', without any express or implied
-warranty. In no event will the authors be held liable for any damages
-arising from the use of this software.
-
-Permission is granted to anyone to use this software for any purpose,
-including commercial applications, and to alter it and redistribute it
-freely, subject to the following restrictions:
-
-    1. The origin of this software must not be misrepresented; you must not
-    claim that you wrote the original software. If you use this software
-    in a product, an acknowledgment in the product documentation would be
-    appreciated but is not required.
-
-    2. Altered source versions must be plainly marked as such, and must not be
-    misrepresented as being the original software.
-
-    3. This notice may not be removed or altered from any source
-    distribution.
-*/
-
-#ifndef _TINYCTHREAD_H_
-#define _TINYCTHREAD_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
-* @file
-* @mainpage TinyCThread API Reference
-*
-* @section intro_sec Introduction
-* TinyCThread is a minimal, portable implementation of basic threading
-* classes for C.
-*
-* They closely mimic the functionality and naming of the C11 standard, and
-* should be easily replaceable with the corresponding standard variants.
-*
-* @section port_sec Portability
-* The Win32 variant uses the native Win32 API for implementing the thread
-* classes, while for other systems, the POSIX threads API (pthread) is used.
-*
-* @section misc_sec Miscellaneous
-* The following special keywords are available: #_Thread_local.
-*
-* For more detailed information, browse the different sections of this
-* documentation. A good place to start is:
-* tinycthread.h.
-*/
-
-/* Which platform are we on? */
-#if !defined(_TTHREAD_PLATFORM_DEFINED_)
-  #if defined(_WIN32) || defined(__WIN32__) || defined(__WINDOWS__)
-    #define _TTHREAD_WIN32_
-  #else
-    #define _TTHREAD_POSIX_
-  #endif
-  #define _TTHREAD_PLATFORM_DEFINED_
-#endif
-
-/* Activate some POSIX functionality (e.g. clock_gettime and recursive mutexes) */
-#if defined(_TTHREAD_POSIX_)
-  #undef _FEATURES_H
-  #if !defined(_GNU_SOURCE)
-    #define _GNU_SOURCE
-  #endif
-  #if !defined(_POSIX_C_SOURCE) || ((_POSIX_C_SOURCE - 0) < 199309L)
-    #undef _POSIX_C_SOURCE
-    #define _POSIX_C_SOURCE 199309L
-  #endif
-  #if !defined(_XOPEN_SOURCE) || ((_XOPEN_SOURCE - 0) < 500)
-    #undef _XOPEN_SOURCE
-    #define _XOPEN_SOURCE 500
-  #endif
-#endif
-
-/* Generic includes */
-#include <time.h>
-
-/* Platform specific includes */
-#if defined(_TTHREAD_POSIX_)
-  #include <pthread.h>
-#elif defined(_TTHREAD_WIN32_)
-  #ifndef WIN32_LEAN_AND_MEAN
-    #define WIN32_LEAN_AND_MEAN
-    #define __UNDEF_LEAN_AND_MEAN
-  #endif
-  #include <windows.h>
-  #ifdef __UNDEF_LEAN_AND_MEAN
-    #undef WIN32_LEAN_AND_MEAN
-    #undef __UNDEF_LEAN_AND_MEAN
-  #endif
-#endif
-
-/* Compiler-specific information */
-#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
-  #define TTHREAD_NORETURN _Noreturn
-#elif defined(__GNUC__)
-  #define TTHREAD_NORETURN __attribute__((__noreturn__))
-#else
-  #define TTHREAD_NORETURN
-#endif
-
-/* If TIME_UTC is missing, provide it and provide a wrapper for
-   timespec_get. */
-#ifndef TIME_UTC
-#define TIME_UTC 1
-#define _TTHREAD_EMULATE_TIMESPEC_GET_
-
-#if defined(_TTHREAD_WIN32_)
-struct _tthread_timespec {
-  time_t tv_sec;
-  long   tv_nsec;
-};
-#define timespec _tthread_timespec
-#endif
-
-int _tthread_timespec_get(struct timespec *ts, int base);
-#define timespec_get _tthread_timespec_get
-#endif
-
-/** TinyCThread version (major number). */
-#define TINYCTHREAD_VERSION_MAJOR 1
-/** TinyCThread version (minor number). */
-#define TINYCTHREAD_VERSION_MINOR 2
-/** TinyCThread version (full version). */
-#define TINYCTHREAD_VERSION (TINYCTHREAD_VERSION_MAJOR * 100 + TINYCTHREAD_VERSION_MINOR)
-
-/**
-* @def _Thread_local
-* Thread local storage keyword.
-* A variable that is declared with the @c _Thread_local keyword makes the
-* value of the variable local to each thread (known as thread-local storage,
-* or TLS). Example usage:
-* @code
-* // This variable is local to each thread.
-* _Thread_local int variable;
-* @endcode
-* @note The @c _Thread_local keyword is a macro that maps to the corresponding
-* compiler directive (e.g. @c __declspec(thread)).
-* @note This directive is currently not supported on Mac OS X (it will give
-* a compiler error), since compile-time TLS is not supported in the Mac OS X
-* executable format. Also, some older versions of MinGW (before GCC 4.x) do
-* not support this directive, nor does the Tiny C Compiler.
-* @hideinitializer
-*/
-
-#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) && !defined(_Thread_local)
- #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__)
-  #define _Thread_local __thread
- #else
-  #define _Thread_local __declspec(thread)
- #endif
-#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && (((__GNUC__ << 8) | __GNUC_MINOR__) < ((4 << 8) | 9))
- #define _Thread_local __thread
-#endif
-
-/* Macros */
-#if defined(_TTHREAD_WIN32_)
-#define TSS_DTOR_ITERATIONS (4)
-#else
-#define TSS_DTOR_ITERATIONS PTHREAD_DESTRUCTOR_ITERATIONS
-#endif
-
-/* Function return values */
-#define thrd_error    0 /**< The requested operation failed */
-#define thrd_success  1 /**< The requested operation succeeded */
-#define thrd_timedout 2 /**< The time specified in the call was reached without acquiring the requested resource */
-#define thrd_busy     3 /**< The requested operation failed because a tesource requested by a test and return function is already in use */
-#define thrd_nomem    4 /**< The requested operation failed because it was unable to allocate memory */
-
-/* Mutex types */
-#define mtx_plain     0
-#define mtx_timed     1
-#define mtx_recursive 2
-
-/* Mutex */
-#if defined(_TTHREAD_WIN32_)
-typedef struct {
-  union {
-    CRITICAL_SECTION cs;      /* Critical section handle (used for non-timed mutexes) */
-    HANDLE mut;               /* Mutex handle (used for timed mutex) */
-  } mHandle;                  /* Mutex handle */
-  int mAlreadyLocked;         /* TRUE if the mutex is already locked */
-  int mRecursive;             /* TRUE if the mutex is recursive */
-  int mTimed;                 /* TRUE if the mutex is timed */
-} mtx_t;
-#else
-typedef pthread_mutex_t mtx_t;
-#endif
-
-/** Create a mutex object.
-* @param mtx A mutex object.
-* @param type Bit-mask that must have one of the following six values:
-*   @li @c mtx_plain for a simple non-recursive mutex
-*   @li @c mtx_timed for a non-recursive mutex that supports timeout
-*   @li @c mtx_plain | @c mtx_recursive (same as @c mtx_plain, but recursive)
-*   @li @c mtx_timed | @c mtx_recursive (same as @c mtx_timed, but recursive)
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int mtx_init(mtx_t *mtx, int type);
-
-/** Release any resources used by the given mutex.
-* @param mtx A mutex object.
-*/
-void mtx_destroy(mtx_t *mtx);
-
-/** Lock the given mutex.
-* Blocks until the given mutex can be locked. If the mutex is non-recursive, and
-* the calling thread already has a lock on the mutex, this call will block
-* forever.
-* @param mtx A mutex object.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int mtx_lock(mtx_t *mtx);
-
-/** NOT YET IMPLEMENTED.
-*/
-int mtx_timedlock(mtx_t *mtx, const struct timespec *ts);
-
-/** Try to lock the given mutex.
-* The specified mutex shall support either test and return or timeout. If the
-* mutex is already locked, the function returns without blocking.
-* @param mtx A mutex object.
-* @return @ref thrd_success on success, or @ref thrd_busy if the resource
-* requested is already in use, or @ref thrd_error if the request could not be
-* honored.
-*/
-int mtx_trylock(mtx_t *mtx);
-
-/** Unlock the given mutex.
-* @param mtx A mutex object.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int mtx_unlock(mtx_t *mtx);
-
-/* Condition variable */
-#if defined(_TTHREAD_WIN32_)
-typedef struct {
-  HANDLE mEvents[2];                  /* Signal and broadcast event HANDLEs. */
-  unsigned int mWaitersCount;         /* Count of the number of waiters. */
-  CRITICAL_SECTION mWaitersCountLock; /* Serialize access to mWaitersCount. */
-} cnd_t;
-#else
-typedef pthread_cond_t cnd_t;
-#endif
-
-/** Create a condition variable object.
-* @param cond A condition variable object.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int cnd_init(cnd_t *cond);
-
-/** Release any resources used by the given condition variable.
-* @param cond A condition variable object.
-*/
-void cnd_destroy(cnd_t *cond);
-
-/** Signal a condition variable.
-* Unblocks one of the threads that are blocked on the given condition variable
-* at the time of the call. If no threads are blocked on the condition variable
-* at the time of the call, the function does nothing and return success.
-* @param cond A condition variable object.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int cnd_signal(cnd_t *cond);
-
-/** Broadcast a condition variable.
-* Unblocks all of the threads that are blocked on the given condition variable
-* at the time of the call. If no threads are blocked on the condition variable
-* at the time of the call, the function does nothing and return success.
-* @param cond A condition variable object.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int cnd_broadcast(cnd_t *cond);
-
-/** Wait for a condition variable to become signaled.
-* The function atomically unlocks the given mutex and endeavors to block until
-* the given condition variable is signaled by a call to cnd_signal or to
-* cnd_broadcast. When the calling thread becomes unblocked it locks the mutex
-* before it returns.
-* @param cond A condition variable object.
-* @param mtx A mutex object.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int cnd_wait(cnd_t *cond, mtx_t *mtx);
-
-/** Wait for a condition variable to become signaled.
-* The function atomically unlocks the given mutex and endeavors to block until
-* the given condition variable is signaled by a call to cnd_signal or to
-* cnd_broadcast, or until after the specified time. When the calling thread
-* becomes unblocked it locks the mutex before it returns.
-* @param cond A condition variable object.
-* @param mtx A mutex object.
-* @param xt A point in time at which the request will time out (absolute time).
-* @return @ref thrd_success upon success, or @ref thrd_timeout if the time
-* specified in the call was reached without acquiring the requested resource, or
-* @ref thrd_error if the request could not be honored.
-*/
-int cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *ts);
-
-/** Same as cnd_timedwait() but takes a relative timeout in milliseconds.
- */
-int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms);
-
-/** Same as cnd_timedwait_ms() but updates the remaining time. */
-int cnd_timedwait_msp (cnd_t *cnd, mtx_t *mtx, int *timeout_msp);
-
-/* Thread */
-#if defined(_TTHREAD_WIN32_)
-typedef HANDLE thrd_t;
-#else
-typedef pthread_t thrd_t;
-#endif
-
-/** Thread start function.
-* Any thread that is started with the @ref thrd_create() function must be
-* started through a function of this type.
-* @param arg The thread argument (the @c arg argument of the corresponding
-*        @ref thrd_create() call).
-* @return The thread return value, which can be obtained by another thread
-* by using the @ref thrd_join() function.
-*/
-typedef int (*thrd_start_t)(void *arg);
-
-/** Create a new thread.
-* @param thr Identifier of the newly created thread.
-* @param func A function pointer to the function that will be executed in
-*        the new thread.
-* @param arg An argument to the thread function.
-* @return @ref thrd_success on success, or @ref thrd_nomem if no memory could
-* be allocated for the thread requested, or @ref thrd_error if the request
-* could not be honored.
-* @note A thread’s identifier may be reused for a different thread once the
-* original thread has exited and either been detached or joined to another
-* thread.
-*/
-int thrd_create(thrd_t *thr, thrd_start_t func, void *arg);
-
-/** Identify the calling thread.
-* @return The identifier of the calling thread.
-*/
-thrd_t thrd_current(void);
-
-
-/** Checks if passed thread is the current thread.
- * @return non-zero if same thread, else 0.
- */
-int thrd_is_current(thrd_t thr);
-
-
-/** Dispose of any resources allocated to the thread when that thread exits.
- * @return thrd_success, or thrd_error on error
-*/
-int thrd_detach(thrd_t thr);
-
-/** Compare two thread identifiers.
-* The function determines if two thread identifiers refer to the same thread.
-* @return Zero if the two thread identifiers refer to different threads.
-* Otherwise a nonzero value is returned.
-*/
-int thrd_equal(thrd_t thr0, thrd_t thr1);
-
-/** Terminate execution of the calling thread.
-* @param res Result code of the calling thread.
-*/
-TTHREAD_NORETURN void thrd_exit(int res);
-
-/** Wait for a thread to terminate.
-* The function joins the given thread with the current thread by blocking
-* until the other thread has terminated.
-* @param thr The thread to join with.
-* @param res If this pointer is not NULL, the function will store the result
-*        code of the given thread in the integer pointed to by @c res.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int thrd_join(thrd_t thr, int *res);
-
-/** Put the calling thread to sleep.
-* Suspend execution of the calling thread.
-* @param duration  Interval to sleep for
-* @param remaining If non-NULL, this parameter will hold the remaining
-*                  time until time_point upon return. This will
-*                  typically be zero, but if the thread was woken up
-*                  by a signal that is not ignored before duration was
-*                  reached @c remaining will hold a positive time.
-* @return 0 (zero) on successful sleep, -1 if an interrupt occurred,
-*         or a negative value if the operation fails.
-*/
-int thrd_sleep(const struct timespec *duration, struct timespec *remaining);
-
-/** Yield execution to another thread.
-* Permit other threads to run, even if the current thread would ordinarily
-* continue to run.
-*/
-void thrd_yield(void);
-
-/* Thread local storage */
-#if defined(_TTHREAD_WIN32_)
-typedef DWORD tss_t;
-#else
-typedef pthread_key_t tss_t;
-#endif
-
-/** Destructor function for a thread-specific storage.
-* @param val The value of the destructed thread-specific storage.
-*/
-typedef void (*tss_dtor_t)(void *val);
-
-/** Create a thread-specific storage.
-* @param key The unique key identifier that will be set if the function is
-*        successful.
-* @param dtor Destructor function. This can be NULL.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-* @note On Windows, the @c dtor will definitely be called when
-* appropriate for threads created with @ref thrd_create.  It will be
-* called for other threads in most cases, the possible exception being
-* for DLLs loaded with LoadLibraryEx.  In order to be certain, you
-* should use @ref thrd_create whenever possible.
-*/
-int tss_create(tss_t *key, tss_dtor_t dtor);
-
-/** Delete a thread-specific storage.
-* The function releases any resources used by the given thread-specific
-* storage.
-* @param key The key that shall be deleted.
-*/
-void tss_delete(tss_t key);
-
-/** Get the value for a thread-specific storage.
-* @param key The thread-specific storage identifier.
-* @return The value for the current thread held in the given thread-specific
-* storage.
-*/
-void *tss_get(tss_t key);
-
-/** Set the value for a thread-specific storage.
-* @param key The thread-specific storage identifier.
-* @param val The value of the thread-specific storage to set for the current
-*        thread.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int tss_set(tss_t key, void *val);
-
-#if defined(_TTHREAD_WIN32_)
-  typedef struct {
-    LONG volatile status;
-    CRITICAL_SECTION lock;
-  } once_flag;
-  #define ONCE_FLAG_INIT {0,}
-#else
-  #define once_flag pthread_once_t
-  #define ONCE_FLAG_INIT PTHREAD_ONCE_INIT
-#endif
-
-/** Invoke a callback exactly once
- * @param flag Flag used to ensure the callback is invoked exactly
- *        once.
- * @param func Callback to invoke.
- */
-#if defined(_TTHREAD_WIN32_)
-  void call_once(once_flag *flag, void (*func)(void));
-#else
-  #define call_once(flag,func) pthread_once(flag,func)
-#endif
-
-
-
-
-/**
-* FIXME: description */
-#if defined(_TTHREAD_WIN32_)
-typedef struct rwlock_t {
-	SRWLOCK  lock;
-	int       rcnt;
-	int       wcnt;
-} rwlock_t;
-#define rwlock_init(rwl)    do { (rwl)->rcnt = (rwl)->wcnt = 0; InitializeSRWLock(&(rwl)->lock); } while (0)
-#define rwlock_destroy(rwl)
-#define rwlock_rdlock(rwl)   do { if (0) printf("Thr %i: at %i:   RDLOCK %p   %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); AcquireSRWLockShared(&(rwl)->lock); InterlockedIncrement(&(rwl)->rcnt); } while (0)
-#define rwlock_wrlock(rwl)   do { if (0) printf("Thr %i: at %i:   WRLOCK %p   %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); AcquireSRWLockExclusive(&(rwl)->lock); InterlockedIncrement(&(rwl)->wcnt); } while (0)
-#define rwlock_rdunlock(rwl) do { if (0) printf("Thr %i: at %i: RDUNLOCK %p   %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt > 0 && (rwl)->wcnt >= 0); ReleaseSRWLockShared(&(rwl)->lock); InterlockedDecrement(&(rwl)->rcnt); } while (0)  
-#define rwlock_wrunlock(rwl) do { if (0) printf("Thr %i: at %i: RWUNLOCK %p   %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt > 0); ReleaseSRWLockExclusive(&(rwl)->lock); InterlockedDecrement(&(rwl)->wcnt); } while (0)  
-
-#define rwlock_rdlock_d(rwl)   do { if (1) printf("Thr %i: at %i:   RDLOCK %p   %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); AcquireSRWLockShared(&(rwl)->lock); InterlockedIncrement(&(rwl)->rcnt); } while (0)
-#define rwlock_wrlock_d(rwl)   do { if (1) printf("Thr %i: at %i:   WRLOCK %p   %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); AcquireSRWLockExclusive(&(rwl)->lock); InterlockedIncrement(&(rwl)->wcnt); } while (0)
-#define rwlock_rdunlock_d(rwl) do { if (1) printf("Thr %i: at %i: RDUNLOCK %p   %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt > 0 && (rwl)->wcnt >= 0); ReleaseSRWLockShared(&(rwl)->lock); InterlockedDecrement(&(rwl)->rcnt); } while (0)  
-#define rwlock_wrunlock_d(rwl) do { if (1) printf("Thr %i: at %i: RWUNLOCK %p   %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt > 0); ReleaseSRWLockExclusive(&(rwl)->lock); InterlockedDecrement(&(rwl)->wcnt); } while (0)  
-
-
-#else
-typedef pthread_rwlock_t rwlock_t;
-
-int rwlock_init (rwlock_t *rwl);
-int rwlock_destroy (rwlock_t *rwl);
-int rwlock_rdlock (rwlock_t *rwl);
-int rwlock_wrlock (rwlock_t *rwl);
-int rwlock_rdunlock (rwlock_t *rwl);
-int rwlock_wrunlock (rwlock_t *rwl);
-
-#endif
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _TINYTHREAD_H_ */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/win32_config.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/win32_config.h b/thirdparty/librdkafka-0.11.1/src/win32_config.h
deleted file mode 100644
index d759aab..0000000
--- a/thirdparty/librdkafka-0.11.1/src/win32_config.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
-* librdkafka - Apache Kafka C library
-*
-* Copyright (c) 2012-2015 Magnus Edenhill
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are met:
-*
-* 1. Redistributions of source code must retain the above copyright notice,
-*    this list of conditions and the following disclaimer.
-* 2. Redistributions in binary form must reproduce the above copyright notice,
-*    this list of conditions and the following disclaimer in the documentation
-*    and/or other materials provided with the distribution.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/**
- * Hand-crafted config header file for Win32 builds.
- */
-#pragma once
-
-#define WITH_SSL 1
-#define WITH_ZLIB 1
-#define WITH_SNAPPY 1
-#define WITH_SASL_SCRAM 1
-#define ENABLE_DEVEL 0
-#define WITH_PLUGINS 1
-#define SOLIB_EXT ".dll"

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/xxhash.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/xxhash.c b/thirdparty/librdkafka-0.11.1/src/xxhash.c
deleted file mode 100644
index e9ff2d4..0000000
--- a/thirdparty/librdkafka-0.11.1/src/xxhash.c
+++ /dev/null
@@ -1,889 +0,0 @@
-/*
-*  xxHash - Fast Hash algorithm
-*  Copyright (C) 2012-2016, Yann Collet
-*
-*  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-*
-*  Redistribution and use in source and binary forms, with or without
-*  modification, are permitted provided that the following conditions are
-*  met:
-*
-*  * Redistributions of source code must retain the above copyright
-*  notice, this list of conditions and the following disclaimer.
-*  * Redistributions in binary form must reproduce the above
-*  copyright notice, this list of conditions and the following disclaimer
-*  in the documentation and/or other materials provided with the
-*  distribution.
-*
-*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-*  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-*  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-*  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-*  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-*  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-*  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-*  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-*  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*
-*  You can contact the author at :
-*  - xxHash homepage: http://www.xxhash.com
-*  - xxHash source repository : https://github.com/Cyan4973/xxHash
-*/
-
-
-/* *************************************
-*  Tuning parameters
-***************************************/
-/*!XXH_FORCE_MEMORY_ACCESS :
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
- *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
- * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
- *            It can generate buggy code on targets which do not support unaligned memory accesses.
- *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
- * See http://stackoverflow.com/a/32095106/646947 for details.
- * Prefer these methods in priority order (0 > 1 > 2)
- */
-#ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
-#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-#    define XXH_FORCE_MEMORY_ACCESS 2
-#  elif defined(__INTEL_COMPILER) || \
-  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
-#    define XXH_FORCE_MEMORY_ACCESS 1
-#  endif
-#endif
-
-/*!XXH_ACCEPT_NULL_INPUT_POINTER :
- * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
- * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
- * By default, this option is disabled. To enable it, uncomment below define :
- */
-/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
-
-/*!XXH_FORCE_NATIVE_FORMAT :
- * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
- * Results are therefore identical for little-endian and big-endian CPU.
- * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
- * Should endian-independence be of no importance for your application, you may set the #define below to 1,
- * to improve speed for Big-endian CPU.
- * This option has no impact on Little_Endian CPU.
- */
-#ifndef XXH_FORCE_NATIVE_FORMAT   /* can be defined externally */
-#  define XXH_FORCE_NATIVE_FORMAT 0
-#endif
-
-/*!XXH_FORCE_ALIGN_CHECK :
- * This is a minor performance trick, only useful with lots of very small keys.
- * It means : check for aligned/unaligned input.
- * The check costs one initial branch per hash; set to 0 when the input data
- * is guaranteed to be aligned.
- */
-#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
-#  if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
-#    define XXH_FORCE_ALIGN_CHECK 0
-#  else
-#    define XXH_FORCE_ALIGN_CHECK 1
-#  endif
-#endif
-
-
-/* *************************************
-*  Includes & Memory related functions
-***************************************/
-/*! Modify the local functions below should you wish to use some other memory routines
-*   for malloc(), free() */
-#include <stdlib.h>
-static void* XXH_malloc(size_t s) { return malloc(s); }
-static void  XXH_free  (void* p)  { free(p); }
-/*! and for memcpy() */
-#include <string.h>
-static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
-
-#define XXH_STATIC_LINKING_ONLY
-#include "xxhash.h"
-
-
-/* *************************************
-*  Compiler Specific Options
-***************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  pragma warning(disable : 4127)      /* disable: C4127: conditional expression is constant */
-#  define FORCE_INLINE static __forceinline
-#else
-#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
-#    ifdef __GNUC__
-#      define FORCE_INLINE static inline __attribute__((always_inline))
-#    else
-#      define FORCE_INLINE static inline
-#    endif
-#  else
-#    define FORCE_INLINE static
-#  endif /* __STDC_VERSION__ */
-#endif
-
-
-/* *************************************
-*  Basic Types
-***************************************/
-#ifndef MEM_MODULE
-# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
-#   include <stdint.h>
-    typedef uint8_t  BYTE;
-    typedef uint16_t U16;
-    typedef uint32_t U32;
-    typedef  int32_t S32;
-# else
-    typedef unsigned char      BYTE;
-    typedef unsigned short     U16;
-    typedef unsigned int       U32;
-    typedef   signed int       S32;
-# endif
-#endif
-
-#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
-
-/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
-static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
-
-#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union { U32 u32; } __attribute__((packed)) unalign;
-static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
-
-#else
-
-/* portable and safe solution. Generally efficient.
- * see : http://stackoverflow.com/a/32095106/646947
- */
-static U32 XXH_read32(const void* memPtr)
-{
-    U32 val;
-    memcpy(&val, memPtr, sizeof(val));
-    return val;
-}
-
-#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
-
-
-/* ****************************************
-*  Compiler-specific Functions and Macros
-******************************************/
-#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
-
-/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
-#if defined(_MSC_VER)
-#  define XXH_rotl32(x,r) _rotl(x,r)
-#  define XXH_rotl64(x,r) _rotl64(x,r)
-#else
-#  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
-#  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
-#endif
-
-#if defined(_MSC_VER)     /* Visual Studio */
-#  define XXH_swap32 _byteswap_ulong
-#elif XXH_GCC_VERSION >= 403
-#  define XXH_swap32 __builtin_bswap32
-#else
-static U32 XXH_swap32 (U32 x)
-{
-    return  ((x << 24) & 0xff000000 ) |
-            ((x <<  8) & 0x00ff0000 ) |
-            ((x >>  8) & 0x0000ff00 ) |
-            ((x >> 24) & 0x000000ff );
-}
-#endif
-
-
-/* *************************************
-*  Architecture Macros
-***************************************/
-typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
-
-/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
-#ifndef XXH_CPU_LITTLE_ENDIAN
-    static const int g_one = 1;
-#   define XXH_CPU_LITTLE_ENDIAN   (*(const char*)(&g_one))
-#endif
-
-
-/* ***************************
-*  Memory reads
-*****************************/
-typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
-
-FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
-{
-    if (align==XXH_unaligned)
-        return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
-    else
-        return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
-}
-
-FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
-{
-    return XXH_readLE32_align(ptr, endian, XXH_unaligned);
-}
-
-static U32 XXH_readBE32(const void* ptr)
-{
-    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
-}
-
-
-/* *************************************
-*  Macros
-***************************************/
-#define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(int)(!!(c)) }; }    /* use only *after* variable declarations */
-XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
-
-
-/* *******************************************************************
-*  32-bits hash functions
-*********************************************************************/
-static const U32 PRIME32_1 = 2654435761U;
-static const U32 PRIME32_2 = 2246822519U;
-static const U32 PRIME32_3 = 3266489917U;
-static const U32 PRIME32_4 =  668265263U;
-static const U32 PRIME32_5 =  374761393U;
-
-static U32 XXH32_round(U32 seed, U32 input)
-{
-    seed += input * PRIME32_2;
-    seed  = XXH_rotl32(seed, 13);
-    seed *= PRIME32_1;
-    return seed;
-}
-
-FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
-{
-    const BYTE* p = (const BYTE*)input;
-    const BYTE* bEnd = p + len;
-    U32 h32;
-#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
-
-#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
-    if (p==NULL) {
-        len=0;
-        bEnd=p=(const BYTE*)(size_t)16;
-    }
-#endif
-
-    if (len>=16) {
-        const BYTE* const limit = bEnd - 16;
-        U32 v1 = seed + PRIME32_1 + PRIME32_2;
-        U32 v2 = seed + PRIME32_2;
-        U32 v3 = seed + 0;
-        U32 v4 = seed - PRIME32_1;
-
-        do {
-            v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
-            v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
-            v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
-            v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
-        } while (p<=limit);
-
-        h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
-    } else {
-        h32  = seed + PRIME32_5;
-    }
-
-    h32 += (U32) len;
-
-    while (p+4<=bEnd) {
-        h32 += XXH_get32bits(p) * PRIME32_3;
-        h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
-        p+=4;
-    }
-
-    while (p<bEnd) {
-        h32 += (*p) * PRIME32_5;
-        h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
-        p++;
-    }
-
-    h32 ^= h32 >> 15;
-    h32 *= PRIME32_2;
-    h32 ^= h32 >> 13;
-    h32 *= PRIME32_3;
-    h32 ^= h32 >> 16;
-
-    return h32;
-}
-
-
-XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
-{
-#if 0
-    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
-    XXH32_state_t state;
-    XXH32_reset(&state, seed);
-    XXH32_update(&state, input, len);
-    return XXH32_digest(&state);
-#else
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-    if (XXH_FORCE_ALIGN_CHECK) {
-        if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */
-            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-                return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
-            else
-                return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
-    }   }
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
-    else
-        return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
-#endif
-}
-
-
-
-/*======   Hash streaming   ======*/
-
-XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
-{
-    return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
-}
-XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
-{
-    XXH_free(statePtr);
-    return XXH_OK;
-}
-
-XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
-{
-    memcpy(dstState, srcState, sizeof(*dstState));
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
-{
-    XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
-    memset(&state, 0, sizeof(state)-4);   /* do not write into reserved, for future removal */
-    state.v1 = seed + PRIME32_1 + PRIME32_2;
-    state.v2 = seed + PRIME32_2;
-    state.v3 = seed + 0;
-    state.v4 = seed - PRIME32_1;
-    memcpy(statePtr, &state, sizeof(state));
-    return XXH_OK;
-}
-
-
-FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
-{
-    const BYTE* p = (const BYTE*)input;
-    const BYTE* const bEnd = p + len;
-
-#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
-    if (input==NULL) return XXH_ERROR;
-#endif
-
-    state->total_len_32 += (unsigned)len;
-    state->large_len |= (len>=16) | (state->total_len_32>=16);
-
-    if (state->memsize + len < 16)  {   /* fill in tmp buffer */
-        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
-        state->memsize += (unsigned)len;
-        return XXH_OK;
-    }
-
-    if (state->memsize) {   /* some data left from previous update */
-        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
-        {   const U32* p32 = state->mem32;
-            state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
-            state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
-            state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
-            state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
-        }
-        p += 16-state->memsize;
-        state->memsize = 0;
-    }
-
-    if (p <= bEnd-16) {
-        const BYTE* const limit = bEnd - 16;
-        U32 v1 = state->v1;
-        U32 v2 = state->v2;
-        U32 v3 = state->v3;
-        U32 v4 = state->v4;
-
-        do {
-            v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
-            v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
-            v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
-            v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
-        } while (p<=limit);
-
-        state->v1 = v1;
-        state->v2 = v2;
-        state->v3 = v3;
-        state->v4 = v4;
-    }
-
-    if (p < bEnd) {
-        XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
-        state->memsize = (unsigned)(bEnd-p);
-    }
-
-    return XXH_OK;
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
-{
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
-    else
-        return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
-}
-
-
-
-FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
-{
-    const BYTE * p = (const BYTE*)state->mem32;
-    const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
-    U32 h32;
-
-    if (state->large_len) {
-        h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
-    } else {
-        h32 = state->v3 /* == seed */ + PRIME32_5;
-    }
-
-    h32 += state->total_len_32;
-
-    while (p+4<=bEnd) {
-        h32 += XXH_readLE32(p, endian) * PRIME32_3;
-        h32  = XXH_rotl32(h32, 17) * PRIME32_4;
-        p+=4;
-    }
-
-    while (p<bEnd) {
-        h32 += (*p) * PRIME32_5;
-        h32  = XXH_rotl32(h32, 11) * PRIME32_1;
-        p++;
-    }
-
-    h32 ^= h32 >> 15;
-    h32 *= PRIME32_2;
-    h32 ^= h32 >> 13;
-    h32 *= PRIME32_3;
-    h32 ^= h32 >> 16;
-
-    return h32;
-}
-
-
-XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
-{
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH32_digest_endian(state_in, XXH_littleEndian);
-    else
-        return XXH32_digest_endian(state_in, XXH_bigEndian);
-}
-
-
-/*======   Canonical representation   ======*/
-
-/*! Default XXH result types are basic unsigned 32 and 64 bits.
-*   The canonical representation follows human-readable write convention, aka big-endian (large digits first).
-*   These functions allow transformation of hash result into and from its canonical format.
-*   This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
-*/
-
-XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
-{
-    XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
-    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
-    memcpy(dst, &hash, sizeof(*dst));
-}
-
-XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
-{
-    return XXH_readBE32(src);
-}
-
-
-#ifndef XXH_NO_LONG_LONG
-
-/* *******************************************************************
-*  64-bits hash functions
-*********************************************************************/
-
-/*======   Memory access   ======*/
-
-#ifndef MEM_MODULE
-# define MEM_MODULE
-# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
-#   include <stdint.h>
-    typedef uint64_t U64;
-# else
-    typedef unsigned long long U64;   /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
-# endif
-#endif
-
-
-#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
-
-/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
-static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
-
-#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64;
-static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
-
-#else
-
-/* portable and safe solution. Generally efficient.
- * see : http://stackoverflow.com/a/32095106/646947
- */
-
-static U64 XXH_read64(const void* memPtr)
-{
-    U64 val;
-    memcpy(&val, memPtr, sizeof(val));
-    return val;
-}
-
-#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
-
-#if defined(_MSC_VER)     /* Visual Studio */
-#  define XXH_swap64 _byteswap_uint64
-#elif XXH_GCC_VERSION >= 403
-#  define XXH_swap64 __builtin_bswap64
-#else
-static U64 XXH_swap64 (U64 x)
-{
-    return  ((x << 56) & 0xff00000000000000ULL) |
-            ((x << 40) & 0x00ff000000000000ULL) |
-            ((x << 24) & 0x0000ff0000000000ULL) |
-            ((x << 8)  & 0x000000ff00000000ULL) |
-            ((x >> 8)  & 0x00000000ff000000ULL) |
-            ((x >> 24) & 0x0000000000ff0000ULL) |
-            ((x >> 40) & 0x000000000000ff00ULL) |
-            ((x >> 56) & 0x00000000000000ffULL);
-}
-#endif
-
-FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
-{
-    if (align==XXH_unaligned)
-        return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
-    else
-        return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
-}
-
-FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
-{
-    return XXH_readLE64_align(ptr, endian, XXH_unaligned);
-}
-
-static U64 XXH_readBE64(const void* ptr)
-{
-    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
-}
-
-
-/*======   xxh64   ======*/
-
-static const U64 PRIME64_1 = 11400714785074694791ULL;
-static const U64 PRIME64_2 = 14029467366897019727ULL;
-static const U64 PRIME64_3 =  1609587929392839161ULL;
-static const U64 PRIME64_4 =  9650029242287828579ULL;
-static const U64 PRIME64_5 =  2870177450012600261ULL;
-
-static U64 XXH64_round(U64 acc, U64 input)
-{
-    acc += input * PRIME64_2;
-    acc  = XXH_rotl64(acc, 31);
-    acc *= PRIME64_1;
-    return acc;
-}
-
-static U64 XXH64_mergeRound(U64 acc, U64 val)
-{
-    val  = XXH64_round(0, val);
-    acc ^= val;
-    acc  = acc * PRIME64_1 + PRIME64_4;
-    return acc;
-}
-
-FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
-{
-    const BYTE* p = (const BYTE*)input;
-    const BYTE* const bEnd = p + len;
-    U64 h64;
-#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
-
-#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
-    if (p==NULL) {
-        len=0;
-        bEnd=p=(const BYTE*)(size_t)32;
-    }
-#endif
-
-    if (len>=32) {
-        const BYTE* const limit = bEnd - 32;
-        U64 v1 = seed + PRIME64_1 + PRIME64_2;
-        U64 v2 = seed + PRIME64_2;
-        U64 v3 = seed + 0;
-        U64 v4 = seed - PRIME64_1;
-
-        do {
-            v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
-            v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
-            v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
-            v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
-        } while (p<=limit);
-
-        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
-        h64 = XXH64_mergeRound(h64, v1);
-        h64 = XXH64_mergeRound(h64, v2);
-        h64 = XXH64_mergeRound(h64, v3);
-        h64 = XXH64_mergeRound(h64, v4);
-
-    } else {
-        h64  = seed + PRIME64_5;
-    }
-
-    h64 += (U64) len;
-
-    while (p+8<=bEnd) {
-        U64 const k1 = XXH64_round(0, XXH_get64bits(p));
-        h64 ^= k1;
-        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
-        p+=8;
-    }
-
-    if (p+4<=bEnd) {
-        h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
-        h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
-        p+=4;
-    }
-
-    while (p<bEnd) {
-        h64 ^= (*p) * PRIME64_5;
-        h64 = XXH_rotl64(h64, 11) * PRIME64_1;
-        p++;
-    }
-
-    h64 ^= h64 >> 33;
-    h64 *= PRIME64_2;
-    h64 ^= h64 >> 29;
-    h64 *= PRIME64_3;
-    h64 ^= h64 >> 32;
-
-    return h64;
-}
-
-
-XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
-{
-#if 0
-    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
-    XXH64_state_t state;
-    XXH64_reset(&state, seed);
-    XXH64_update(&state, input, len);
-    return XXH64_digest(&state);
-#else
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-    if (XXH_FORCE_ALIGN_CHECK) {
-        if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */
-            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-                return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
-            else
-                return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
-    }   }
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
-    else
-        return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
-#endif
-}
-
-/*======   Hash Streaming   ======*/
-
-XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
-{
-    return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
-}
-XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
-{
-    XXH_free(statePtr);
-    return XXH_OK;
-}
-
-XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
-{
-    memcpy(dstState, srcState, sizeof(*dstState));
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
-{
-    XXH64_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
-    memset(&state, 0, sizeof(state)-8);   /* do not write into reserved, for future removal */
-    state.v1 = seed + PRIME64_1 + PRIME64_2;
-    state.v2 = seed + PRIME64_2;
-    state.v3 = seed + 0;
-    state.v4 = seed - PRIME64_1;
-    memcpy(statePtr, &state, sizeof(state));
-    return XXH_OK;
-}
-
-FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
-{
-    const BYTE* p = (const BYTE*)input;
-    const BYTE* const bEnd = p + len;
-
-#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
-    if (input==NULL) return XXH_ERROR;
-#endif
-
-    state->total_len += len;
-
-    if (state->memsize + len < 32) {  /* fill in tmp buffer */
-        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
-        state->memsize += (U32)len;
-        return XXH_OK;
-    }
-
-    if (state->memsize) {   /* tmp buffer is full */
-        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
-        state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
-        state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
-        state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
-        state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
-        p += 32-state->memsize;
-        state->memsize = 0;
-    }
-
-    if (p+32 <= bEnd) {
-        const BYTE* const limit = bEnd - 32;
-        U64 v1 = state->v1;
-        U64 v2 = state->v2;
-        U64 v3 = state->v3;
-        U64 v4 = state->v4;
-
-        do {
-            v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
-            v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
-            v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
-            v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
-        } while (p<=limit);
-
-        state->v1 = v1;
-        state->v2 = v2;
-        state->v3 = v3;
-        state->v4 = v4;
-    }
-
-    if (p < bEnd) {
-        XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
-        state->memsize = (unsigned)(bEnd-p);
-    }
-
-    return XXH_OK;
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
-{
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
-    else
-        return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
-}
-
-FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
-{
-    const BYTE * p = (const BYTE*)state->mem64;
-    const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
-    U64 h64;
-
-    if (state->total_len >= 32) {
-        U64 const v1 = state->v1;
-        U64 const v2 = state->v2;
-        U64 const v3 = state->v3;
-        U64 const v4 = state->v4;
-
-        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
-        h64 = XXH64_mergeRound(h64, v1);
-        h64 = XXH64_mergeRound(h64, v2);
-        h64 = XXH64_mergeRound(h64, v3);
-        h64 = XXH64_mergeRound(h64, v4);
-    } else {
-        h64  = state->v3 + PRIME64_5;
-    }
-
-    h64 += (U64) state->total_len;
-
-    while (p+8<=bEnd) {
-        U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
-        h64 ^= k1;
-        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
-        p+=8;
-    }
-
-    if (p+4<=bEnd) {
-        h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
-        h64  = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
-        p+=4;
-    }
-
-    while (p<bEnd) {
-        h64 ^= (*p) * PRIME64_5;
-        h64  = XXH_rotl64(h64, 11) * PRIME64_1;
-        p++;
-    }
-
-    h64 ^= h64 >> 33;
-    h64 *= PRIME64_2;
-    h64 ^= h64 >> 29;
-    h64 *= PRIME64_3;
-    h64 ^= h64 >> 32;
-
-    return h64;
-}
-
-XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
-{
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH64_digest_endian(state_in, XXH_littleEndian);
-    else
-        return XXH64_digest_endian(state_in, XXH_bigEndian);
-}
-
-
-/*====== Canonical representation   ======*/
-
-XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
-{
-    XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
-    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
-    memcpy(dst, &hash, sizeof(*dst));
-}
-
-XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
-{
-    return XXH_readBE64(src);
-}
-
-#endif  /* XXH_NO_LONG_LONG */


[50/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/Doxyfile
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/Doxyfile b/thirdparty/librdkafka-0.11.1/Doxyfile
deleted file mode 100644
index 8e94e12..0000000
--- a/thirdparty/librdkafka-0.11.1/Doxyfile
+++ /dev/null
@@ -1,2385 +0,0 @@
-# Doxyfile 1.8.9.1
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a double hash (##) is considered a comment and is placed in
-# front of the TAG it is preceding.
-#
-# All text after a single hash (#) is considered a comment and will be ignored.
-# The format is:
-# TAG = value [value, ...]
-# For lists, items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (\" \").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all text
-# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
-# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
-# for the list of possible encodings.
-# The default value is: UTF-8.
-
-DOXYFILE_ENCODING      = UTF-8
-
-# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
-# double-quotes, unless you are using Doxywizard) that should identify the
-# project for which the documentation is generated. This name is used in the
-# title of most generated pages and in a few other places.
-# The default value is: My Project.
-
-PROJECT_NAME           = "librdkafka"
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
-# could be handy for archiving the generated documentation or if some version
-# control system is used.
-
-PROJECT_NUMBER         =
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer a
-# quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF          = "The Apache Kafka C/C++ client library"
-
-# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
-# in the documentation. The maximum height of the logo should not exceed 55
-# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
-# the logo to the output directory.
-
-#PROJECT_LOGO           = kafka_logo.png
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
-# into which the generated documentation will be written. If a relative path is
-# entered, it will be relative to the location where doxygen was started. If
-# left blank the current directory will be used.
-
-OUTPUT_DIRECTORY       = staging-docs
-
-# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
-# directories (in 2 levels) under the output directory of each output format and
-# will distribute the generated files over these directories. Enabling this
-# option can be useful when feeding doxygen a huge amount of source files, where
-# putting all generated files in the same directory would otherwise causes
-# performance problems for the file system.
-# The default value is: NO.
-
-CREATE_SUBDIRS         = NO
-
-# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
-# characters to appear in the names of generated files. If set to NO, non-ASCII
-# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
-# U+3044.
-# The default value is: NO.
-
-ALLOW_UNICODE_NAMES    = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
-# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
-# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
-# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
-# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
-# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
-# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
-# Ukrainian and Vietnamese.
-# The default value is: English.
-
-OUTPUT_LANGUAGE        = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
-# descriptions after the members that are listed in the file and class
-# documentation (similar to Javadoc). Set to NO to disable this.
-# The default value is: YES.
-
-BRIEF_MEMBER_DESC      = YES
-
-# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
-# description of a member or function before the detailed description
-#
-# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-# The default value is: YES.
-
-REPEAT_BRIEF           = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator that is
-# used to form the text in various listings. Each string in this list, if found
-# as the leading text of the brief description, will be stripped from the text
-# and the result, after processing the whole list, is used as the annotated
-# text. Otherwise, the brief description is used as-is. If left blank, the
-# following values are used ($name is automatically replaced with the name of
-# the entity):The $name class, The $name widget, The $name file, is, provides,
-# specifies, contains, represents, a, an and the.
-
-ABBREVIATE_BRIEF       =
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# doxygen will generate a detailed section even if there is only a brief
-# description.
-# The default value is: NO.
-
-ALWAYS_DETAILED_SEC    = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-# The default value is: NO.
-
-INLINE_INHERITED_MEMB  = NO
-
-# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
-# before files name in the file list and in the header files. If set to NO the
-# shortest path that makes the file name unique will be used
-# The default value is: YES.
-
-FULL_PATH_NAMES        = YES
-
-# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
-# Stripping is only done if one of the specified strings matches the left-hand
-# part of the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the path to
-# strip.
-#
-# Note that you can specify absolute paths here, but also relative paths, which
-# will be relative from the directory where doxygen is started.
-# This tag requires that the tag FULL_PATH_NAMES is set to YES.
-
-STRIP_FROM_PATH        =
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
-# path mentioned in the documentation of a class, which tells the reader which
-# header file to include in order to use a class. If left blank only the name of
-# the header file containing the class definition is used. Otherwise one should
-# specify the list of include paths that are normally passed to the compiler
-# using the -I flag.
-
-STRIP_FROM_INC_PATH    =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
-# less readable) file names. This can be useful is your file systems doesn't
-# support long names like on DOS, Mac, or CD-ROM.
-# The default value is: NO.
-
-SHORT_NAMES            = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
-# first line (until the first dot) of a Javadoc-style comment as the brief
-# description. If set to NO, the Javadoc-style will behave just like regular Qt-
-# style comments (thus requiring an explicit @brief command for a brief
-# description.)
-# The default value is: NO.
-
-JAVADOC_AUTOBRIEF      = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
-# line (until the first dot) of a Qt-style comment as the brief description. If
-# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
-# requiring an explicit \brief command for a brief description.)
-# The default value is: NO.
-
-QT_AUTOBRIEF           = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
-# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
-# a brief description. This used to be the default behavior. The new default is
-# to treat a multi-line C++ comment block as a detailed description. Set this
-# tag to YES if you prefer the old behavior instead.
-#
-# Note that setting this tag to YES also means that rational rose comments are
-# not recognized any more.
-# The default value is: NO.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
-# documentation from any documented member that it re-implements.
-# The default value is: YES.
-
-INHERIT_DOCS           = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
-# page for each member. If set to NO, the documentation of a member will be part
-# of the file/class/namespace that contains it.
-# The default value is: NO.
-
-SEPARATE_MEMBER_PAGES  = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
-# uses this value to replace tabs by spaces in code fragments.
-# Minimum value: 1, maximum value: 16, default value: 4.
-
-TAB_SIZE               = 4
-
-# This tag can be used to specify a number of aliases that act as commands in
-# the documentation. An alias has the form:
-# name=value
-# For example adding
-# "sideeffect=@par Side Effects:\n"
-# will allow you to put the command \sideeffect (or @sideeffect) in the
-# documentation, which will result in a user-defined paragraph with heading
-# "Side Effects:". You can put \n's in the value part of an alias to insert
-# newlines.
-
-ALIASES                = "locality=@par Thread restriction:"
-ALIASES               += "locks=@par Lock restriction:"
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding "class=itcl::class"
-# will allow you to use the command class in the itcl::class meaning.
-
-TCL_SUBST              =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
-# only. Doxygen will then generate output that is more tailored for C. For
-# instance, some of the names that are used will be different. The list of all
-# members will be omitted, etc.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_FOR_C  = YES
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
-# Python sources only. Doxygen will then generate output that is more tailored
-# for that language. For instance, namespaces will be presented as packages,
-# qualified scopes will look different, etc.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_JAVA   = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources. Doxygen will then generate output that is tailored for Fortran.
-# The default value is: NO.
-
-OPTIMIZE_FOR_FORTRAN   = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for VHDL.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_VHDL   = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given
-# extension. Doxygen has a built-in mapping, but you can override or extend it
-# using this tag. The format is ext=language, where ext is a file extension, and
-# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
-# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
-# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
-# Fortran. In the later case the parser tries to guess whether the code is fixed
-# or free formatted code, this is the default for Fortran type files), VHDL. For
-# instance to make doxygen treat .inc files as Fortran files (default is PHP),
-# and .f files as C (default is Fortran), use: inc=Fortran f=C.
-#
-# Note: For files without extension you can use no_extension as a placeholder.
-#
-# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
-# the files are not read by doxygen.
-
-EXTENSION_MAPPING      =
-
-# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
-# according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you can
-# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
-# case of backward compatibilities issues.
-# The default value is: YES.
-
-MARKDOWN_SUPPORT       = YES
-
-# When enabled doxygen tries to link words that correspond to documented
-# classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by putting a % sign in front of the word or
-# globally by setting AUTOLINK_SUPPORT to NO.
-# The default value is: YES.
-
-AUTOLINK_SUPPORT       = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should set this
-# tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string);
-# versus func(std::string) {}). This also make the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-# The default value is: NO.
-
-BUILTIN_STL_SUPPORT    = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-# The default value is: NO.
-
-CPP_CLI_SUPPORT        = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
-# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
-# will parse them like normal C++ but will assume all classes use public instead
-# of private inheritance when no explicit protection keyword is present.
-# The default value is: NO.
-
-SIP_SUPPORT            = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate
-# getter and setter methods for a property. Setting this option to YES will make
-# doxygen to replace the get and set methods by a property in the documentation.
-# This will only work if the methods are indeed getting or setting a simple
-# type. If this is not the case, or you want to show the methods anyway, you
-# should set this option to NO.
-# The default value is: YES.
-
-IDL_PROPERTY_SUPPORT   = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-# The default value is: NO.
-
-DISTRIBUTE_GROUP_DOC   = NO
-
-# Set the SUBGROUPING tag to YES to allow class member groups of the same type
-# (for instance a group of public functions) to be put as a subgroup of that
-# type (e.g. under the Public Functions section). Set it to NO to prevent
-# subgrouping. Alternatively, this can be done per class using the
-# \nosubgrouping command.
-# The default value is: YES.
-
-SUBGROUPING            = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
-# are shown inside the group in which they are included (e.g. using \ingroup)
-# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
-# and RTF).
-#
-# Note that this feature does not work in combination with
-# SEPARATE_MEMBER_PAGES.
-# The default value is: NO.
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
-# with only public data fields or simple typedef fields will be shown inline in
-# the documentation of the scope in which they are defined (i.e. file,
-# namespace, or group documentation), provided this scope is documented. If set
-# to NO, structs, classes, and unions are shown on a separate page (for HTML and
-# Man pages) or section (for LaTeX and RTF).
-# The default value is: NO.
-
-INLINE_SIMPLE_STRUCTS  = NO
-
-# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
-# enum is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically be
-# useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-# The default value is: NO.
-
-TYPEDEF_HIDES_STRUCT   = YES
-
-# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
-# cache is used to resolve symbols given their name and scope. Since this can be
-# an expensive process and often the same symbol appears multiple times in the
-# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
-# doxygen will become slower. If the cache is too large, memory is wasted. The
-# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
-# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
-# symbols. At the end of a run doxygen will report the cache usage and suggest
-# the optimal cache size from a speed point of view.
-# Minimum value: 0, maximum value: 9, default value: 0.
-
-LOOKUP_CACHE_SIZE      = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
-# documentation are documented, even if no documentation was available. Private
-# class members and static file members will be hidden unless the
-# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
-# Note: This will also disable the warnings about undocumented members that are
-# normally produced when WARNINGS is set to YES.
-# The default value is: NO.
-
-EXTRACT_ALL            = NO
-
-# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
-# be included in the documentation.
-# The default value is: NO.
-
-EXTRACT_PRIVATE        = NO
-
-# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
-# scope will be included in the documentation.
-# The default value is: NO.
-
-EXTRACT_PACKAGE        = NO
-
-# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
-# included in the documentation.
-# The default value is: NO.
-
-EXTRACT_STATIC         = YES
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
-# locally in source files will be included in the documentation. If set to NO,
-# only classes defined in header files are included. Does not have any effect
-# for Java sources.
-# The default value is: YES.
-
-EXTRACT_LOCAL_CLASSES  = YES
-
-# This flag is only useful for Objective-C code. If set to YES, local methods,
-# which are defined in the implementation section but not in the interface are
-# included in the documentation. If set to NO, only methods in the interface are
-# included.
-# The default value is: NO.
-
-EXTRACT_LOCAL_METHODS  = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base name of
-# the file that contains the anonymous namespace. By default anonymous namespace
-# are hidden.
-# The default value is: NO.
-
-EXTRACT_ANON_NSPACES   = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
-# undocumented members inside documented classes or files. If set to NO these
-# members will be included in the various overviews, but no documentation
-# section is generated. This option has no effect if EXTRACT_ALL is enabled.
-# The default value is: NO.
-
-HIDE_UNDOC_MEMBERS     = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy. If set
-# to NO, these classes will be included in the various overviews. This option
-# has no effect if EXTRACT_ALL is enabled.
-# The default value is: NO.
-
-HIDE_UNDOC_CLASSES     = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
-# (class|struct|union) declarations. If set to NO, these declarations will be
-# included in the documentation.
-# The default value is: NO.
-
-HIDE_FRIEND_COMPOUNDS  = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
-# documentation blocks found inside the body of a function. If set to NO, these
-# blocks will be appended to the function's detailed documentation block.
-# The default value is: NO.
-
-HIDE_IN_BODY_DOCS      = NO
-
-# The INTERNAL_DOCS tag determines if documentation that is typed after a
-# \internal command is included. If the tag is set to NO then the documentation
-# will be excluded. Set it to YES to include the internal documentation.
-# The default value is: NO.
-
-INTERNAL_DOCS          = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
-# names in lower-case letters. If set to YES, upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-# The default value is: system dependent.
-
-CASE_SENSE_NAMES       = YES
-
-# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
-# their full class and namespace scopes in the documentation. If set to YES, the
-# scope will be hidden.
-# The default value is: NO.
-
-HIDE_SCOPE_NAMES       = NO
-
-# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
-# append additional text to a page's title, such as Class Reference. If set to
-# YES the compound reference will be hidden.
-# The default value is: NO.
-
-HIDE_COMPOUND_REFERENCE= NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
-# the files that are included by a file in the documentation of that file.
-# The default value is: YES.
-
-SHOW_INCLUDE_FILES     = YES
-
-# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
-# grouped member an include statement to the documentation, telling the reader
-# which file to include in order to use the member.
-# The default value is: NO.
-
-SHOW_GROUPED_MEMB_INC  = NO
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
-# files with double quotes in the documentation rather than with sharp brackets.
-# The default value is: NO.
-
-FORCE_LOCAL_INCLUDES   = NO
-
-# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
-# documentation for inline members.
-# The default value is: YES.
-
-INLINE_INFO            = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
-# (detailed) documentation of file and class members alphabetically by member
-# name. If set to NO, the members will appear in declaration order.
-# The default value is: YES.
-
-SORT_MEMBER_DOCS       = NO
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
-# descriptions of file, namespace and class members alphabetically by member
-# name. If set to NO, the members will appear in declaration order. Note that
-# this will also influence the order of the classes in the class list.
-# The default value is: NO.
-
-SORT_BRIEF_DOCS        = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
-# (brief and detailed) documentation of class members so that constructors and
-# destructors are listed first. If set to NO the constructors will appear in the
-# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
-# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
-# member documentation.
-# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
-# detailed member documentation.
-# The default value is: NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
-# of group names into alphabetical order. If set to NO the group names will
-# appear in their defined order.
-# The default value is: NO.
-
-SORT_GROUP_NAMES       = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
-# fully-qualified names, including namespaces. If set to NO, the class list will
-# be sorted only by class name, not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the alphabetical
-# list.
-# The default value is: NO.
-
-SORT_BY_SCOPE_NAME     = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
-# type resolution of all parameters of a function it will reject a match between
-# the prototype and the implementation of a member function even if there is
-# only one candidate or it is obvious which candidate to choose by doing a
-# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
-# accept a match between prototype and implementation in such cases.
-# The default value is: NO.
-
-STRICT_PROTO_MATCHING  = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
-# list. This list is created by putting \todo commands in the documentation.
-# The default value is: YES.
-
-GENERATE_TODOLIST      = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
-# list. This list is created by putting \test commands in the documentation.
-# The default value is: YES.
-
-GENERATE_TESTLIST      = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
-# list. This list is created by putting \bug commands in the documentation.
-# The default value is: YES.
-
-GENERATE_BUGLIST       = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
-# the deprecated list. This list is created by putting \deprecated commands in
-# the documentation.
-# The default value is: YES.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional documentation
-# sections, marked by \if <section_label> ... \endif and \cond <section_label>
-# ... \endcond blocks.
-
-ENABLED_SECTIONS       =
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
-# initial value of a variable or macro / define can have for it to appear in the
-# documentation. If the initializer consists of more lines than specified here
-# it will be hidden. Use a value of 0 to hide initializers completely. The
-# appearance of the value of individual variables and macros / defines can be
-# controlled using \showinitializer or \hideinitializer command in the
-# documentation regardless of this setting.
-# Minimum value: 0, maximum value: 10000, default value: 30.
-
-MAX_INITIALIZER_LINES  = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
-# the bottom of the documentation of classes and structs. If set to YES, the
-# list will mention the files that were used to generate the documentation.
-# The default value is: YES.
-
-SHOW_USED_FILES        = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
-# will remove the Files entry from the Quick Index and from the Folder Tree View
-# (if specified).
-# The default value is: YES.
-
-SHOW_FILES             = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
-# page. This will remove the Namespaces entry from the Quick Index and from the
-# Folder Tree View (if specified).
-# The default value is: YES.
-
-SHOW_NAMESPACES        = NO
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command command input-file, where command is the value of the
-# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
-# by doxygen. Whatever the program writes to standard output is used as the file
-# version. For an example see the documentation.
-
-FILE_VERSION_FILTER    =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option. You can
-# optionally specify a file name after the option, if omitted DoxygenLayout.xml
-# will be used as the name of the layout file.
-#
-# Note that if you run doxygen from a directory containing a file called
-# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
-# tag is left empty.
-
-LAYOUT_FILE            =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
-# the reference definitions. This must be a list of .bib files. The .bib
-# extension is automatically appended if omitted. This requires the bibtex tool
-# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
-# For LaTeX the style of the bibliography can be controlled using
-# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
-# search path. See also \cite for info how to create references.
-
-CITE_BIB_FILES         =
-
-#---------------------------------------------------------------------------
-# Configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated to
-# standard output by doxygen. If QUIET is set to YES this implies that the
-# messages are off.
-# The default value is: NO.
-
-QUIET                  = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
-# this implies that the warnings are on.
-#
-# Tip: Turn warnings on while writing the documentation.
-# The default value is: YES.
-
-WARNINGS               = YES
-
-# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
-# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
-# will automatically be disabled.
-# The default value is: YES.
-
-WARN_IF_UNDOCUMENTED   = YES
-
-# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some parameters
-# in a documented function, or documenting parameters that don't exist or using
-# markup commands wrongly.
-# The default value is: YES.
-
-WARN_IF_DOC_ERROR      = YES
-
-# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
-# are documented, but have no documentation for their parameters or return
-# value. If set to NO, doxygen will only warn about wrong or incomplete
-# parameter documentation, but not about the absence of documentation.
-# The default value is: NO.
-
-WARN_NO_PARAMDOC       = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that doxygen
-# can produce. The string should contain the $file, $line, and $text tags, which
-# will be replaced by the file and line number from which the warning originated
-# and the warning text. Optionally the format may contain $version, which will
-# be replaced by the version of the file (if it could be obtained via
-# FILE_VERSION_FILTER)
-# The default value is: $file:$line: $text.
-
-WARN_FORMAT            = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning and error
-# messages should be written. If left blank the output is written to standard
-# error (stderr).
-
-WARN_LOGFILE           =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag is used to specify the files and/or directories that contain
-# documented source files. You may enter file names like myfile.cpp or
-# directories like /usr/src/myproject. Separate the files or directories with
-# spaces.
-# Note: If this tag is empty the current directory is searched.
-
-INPUT                  = mainpage.doxy INTRODUCTION.md CONFIGURATION.md src/rdkafka.h src-cpp/rdkafkacpp.h
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
-# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
-# documentation (see: http://www.gnu.org/software/libiconv) for the list of
-# possible encodings.
-# The default value is: UTF-8.
-
-INPUT_ENCODING         = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank the
-# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
-# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
-# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
-# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
-# *.qsf, *.as and *.js.
-
-FILE_PATTERNS          =
-
-# The RECURSIVE tag can be used to specify whether or not subdirectories should
-# be searched for input files as well.
-# The default value is: NO.
-
-RECURSIVE              = NO
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-#
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE                =
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-# The default value is: NO.
-
-EXCLUDE_SYMLINKS       = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories.
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories for example use the pattern */test/*
-
-EXCLUDE_PATTERNS       =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories use the pattern */test/*
-
-EXCLUDE_SYMBOLS        =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or directories
-# that contain example code fragments that are included (see the \include
-# command).
-
-EXAMPLE_PATH           =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank all
-# files are included.
-
-EXAMPLE_PATTERNS       =
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude commands
-# irrespective of the value of the RECURSIVE tag.
-# The default value is: NO.
-
-EXAMPLE_RECURSIVE      = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or directories
-# that contain images that are to be included in the documentation (see the
-# \image command).
-
-IMAGE_PATH             =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command:
-#
-# <filter> <input-file>
-#
-# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
-# name of an input file. Doxygen will then use the output that the filter
-# program writes to standard output. If FILTER_PATTERNS is specified, this tag
-# will be ignored.
-#
-# Note that the filter must not add or remove lines; it is applied before the
-# code is scanned, but not when the output code is generated. If lines are added
-# or removed, the anchors will not be placed correctly.
-
-INPUT_FILTER           =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis. Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match. The filters are a list of the form: pattern=filter
-# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
-# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
-# patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS        =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will also be used to filter the input files that are used for
-# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
-# The default value is: NO.
-
-FILTER_SOURCE_FILES    = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
-# it is also possible to disable source filtering for a specific pattern using
-# *.ext= (so without naming a filter).
-# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
-
-FILTER_SOURCE_PATTERNS =
-
-# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
-# is part of the input, its contents will be placed on the main page
-# (index.html). This can be useful if you have a project on for instance GitHub
-# and want to reuse the introduction page also for the doxygen output.
-
-USE_MDFILE_AS_MAINPAGE =
-
-#---------------------------------------------------------------------------
-# Configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
-# generated. Documented entities will be cross-referenced with these sources.
-#
-# Note: To get rid of all source code in the generated output, make sure that
-# also VERBATIM_HEADERS is set to NO.
-# The default value is: NO.
-
-SOURCE_BROWSER         = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body of functions,
-# classes and enums directly into the documentation.
-# The default value is: NO.
-
-INLINE_SOURCES         = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
-# special comment blocks from generated source code fragments. Normal C, C++ and
-# Fortran comments will always remain visible.
-# The default value is: YES.
-
-STRIP_CODE_COMMENTS    = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
-# function all documented functions referencing it will be listed.
-# The default value is: NO.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES then for each documented function
-# all documented entities called/used by that function will be listed.
-# The default value is: NO.
-
-REFERENCES_RELATION    = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
-# to YES then the hyperlinks from functions in REFERENCES_RELATION and
-# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
-# link to the documentation.
-# The default value is: YES.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
-# source code will show a tooltip with additional information such as prototype,
-# brief description and links to the definition and documentation. Since this
-# will make the HTML file larger and loading of large files a bit slower, you
-# can opt to disable this feature.
-# The default value is: YES.
-# This tag requires that the tag SOURCE_BROWSER is set to YES.
-
-SOURCE_TOOLTIPS        = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code will
-# point to the HTML generated by the htags(1) tool instead of doxygen built-in
-# source browser. The htags tool is part of GNU's global source tagging system
-# (see http://www.gnu.org/software/global/global.html). You will need version
-# 4.8.6 or higher.
-#
-# To use it do the following:
-# - Install the latest version of global
-# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
-# - Make sure the INPUT points to the root of the source tree
-# - Run doxygen as normal
-#
-# Doxygen will invoke htags (and that will in turn invoke gtags), so these
-# tools must be available from the command line (i.e. in the search path).
-#
-# The result: instead of the source browser generated by doxygen, the links to
-# source code will now point to the output of htags.
-# The default value is: NO.
-# This tag requires that the tag SOURCE_BROWSER is set to YES.
-
-USE_HTAGS              = NO
-
-# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
-# verbatim copy of the header file for each class for which an include is
-# specified. Set to NO to disable this.
-# See also: Section \class.
-# The default value is: YES.
-
-VERBATIM_HEADERS       = YES
-
-# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
-# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
-# cost of reduced performance. This can be particularly helpful with template
-# rich C++ code for which doxygen's built-in parser lacks the necessary type
-# information.
-# Note: The availability of this option depends on whether or not doxygen was
-# compiled with the --with-libclang option.
-# The default value is: NO.
-
-CLANG_ASSISTED_PARSING = NO
-
-# If clang assisted parsing is enabled you can provide the compiler with command
-# line options that you would normally use when invoking the compiler. Note that
-# the include paths will already be set by doxygen for the files and directories
-# specified with INPUT and INCLUDE_PATH.
-# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
-
-CLANG_OPTIONS          =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
-# compounds will be generated. Enable this if the project contains a lot of
-# classes, structs, unions or interfaces.
-# The default value is: YES.
-
-ALPHABETICAL_INDEX     = YES
-
-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
-# which the alphabetical index list will be split.
-# Minimum value: 1, maximum value: 20, default value: 5.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-COLS_IN_ALPHA_INDEX    = 5
-
-# In case all classes in a project start with a common prefix, all classes will
-# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
-# can be used to specify a prefix (or a list of prefixes) that should be ignored
-# while generating the index headers.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-IGNORE_PREFIX          =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
-# The default value is: YES.
-
-GENERATE_HTML          = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: html.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_OUTPUT            = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
-# generated HTML page (for example: .htm, .php, .asp).
-# The default value is: .html.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_FILE_EXTENSION    = .html
-
-# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
-# each generated HTML page. If the tag is left blank doxygen will generate a
-# standard header.
-#
-# To get valid HTML the header file that includes any scripts and style sheets
-# that doxygen needs, which is dependent on the configuration options used (e.g.
-# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
-# default header using
-# doxygen -w html new_header.html new_footer.html new_stylesheet.css
-# YourConfigFile
-# and then modify the file new_header.html. See also section "Doxygen usage"
-# for information on how to generate the default header that doxygen normally
-# uses.
-# Note: The header is subject to change so you typically have to regenerate the
-# default header when upgrading to a newer version of doxygen. For a description
-# of the possible markers and block names see the documentation.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_HEADER            =
-
-# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
-# generated HTML page. If the tag is left blank doxygen will generate a standard
-# footer. See HTML_HEADER for more information on how to generate a default
-# footer and what special commands can be used inside the footer. See also
-# section "Doxygen usage" for information on how to generate the default footer
-# that doxygen normally uses.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_FOOTER            =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
-# sheet that is used by each HTML page. It can be used to fine-tune the look of
-# the HTML output. If left blank doxygen will generate a default style sheet.
-# See also section "Doxygen usage" for information on how to generate the style
-# sheet that doxygen normally uses.
-# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
-# it is more robust and this tag (HTML_STYLESHEET) will in the future become
-# obsolete.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_STYLESHEET        =
-
-# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
-# cascading style sheets that are included after the standard style sheets
-# created by doxygen. Using this option one can overrule certain style aspects.
-# This is preferred over using HTML_STYLESHEET since it does not replace the
-# standard style sheet and is therefore more robust against future updates.
-# Doxygen will copy the style sheet files to the output directory.
-# Note: The order of the extra style sheet files is of importance (e.g. the last
-# style sheet in the list overrules the setting of the previous ones in the
-# list). For an example see the documentation.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_EXTRA_STYLESHEET  =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
-# files will be copied as-is; there are no commands or markers available.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_EXTRA_FILES       =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
-# will adjust the colors in the style sheet and background images according to
-# this color. Hue is specified as an angle on a colorwheel, see
-# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
-# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
-# purple, and 360 is red again.
-# Minimum value: 0, maximum value: 359, default value: 220.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_HUE    = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
-# in the HTML output. For a value of 0 the output will use grayscales only. A
-# value of 255 will produce the most vivid colors.
-# Minimum value: 0, maximum value: 255, default value: 100.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_SAT    = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
-# luminance component of the colors in the HTML output. Values below 100
-# gradually make the output lighter, whereas values above 100 make the output
-# darker. The value divided by 100 is the actual gamma applied, so 80 represents
-# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
-# change the gamma.
-# Minimum value: 40, maximum value: 240, default value: 80.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_GAMMA  = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting this
-# to YES can help to show when doxygen was last run and thus if the
-# documentation is up to date.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_TIMESTAMP         = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_DYNAMIC_SECTIONS  = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
-# shown in the various tree structured indices initially; the user can expand
-# and collapse entries dynamically later on. Doxygen will expand the tree to
-# such a level that at most the specified number of entries are visible (unless
-# a fully collapsed tree already exceeds this amount). So setting the number of
-# entries 1 will produce a full collapsed tree by default. 0 is a special value
-# representing an infinite number of entries and will result in a full expanded
-# tree by default.
-# Minimum value: 0, maximum value: 9999, default value: 100.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files will be
-# generated that can be used as input for Apple's Xcode 3 integrated development
-# environment (see: http://developer.apple.com/tools/xcode/), introduced with
-# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
-# Makefile in the HTML output directory. Running make will produce the docset in
-# that directory and running make install will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
-# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_DOCSET        = NO
-
-# This tag determines the name of the docset feed. A documentation feed provides
-# an umbrella under which multiple documentation sets from a single provider
-# (such as a company or product suite) can be grouped.
-# The default value is: Doxygen generated docs.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_FEEDNAME        = "librdkafka documentation"
-
-# This tag specifies a string that should uniquely identify the documentation
-# set bundle. This should be a reverse domain-name style string, e.g.
-# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_BUNDLE_ID       = se.edenhill.librdkafka
-
-# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-# The default value is: org.doxygen.Publisher.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_PUBLISHER_ID    = se.edenhill
-
-# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
-# The default value is: Publisher.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_PUBLISHER_NAME  = Magnus Edenhill
-
-# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
-# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
-# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
-# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
-# Windows.
-#
-# The HTML Help Workshop contains a compiler that can convert all HTML output
-# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
-# files are now used as the Windows 98 help format, and will replace the old
-# Windows help format (.hlp) on all Windows platforms in the future. Compressed
-# HTML files also contain an index, a table of contents, and you can search for
-# words in the documentation. The HTML workshop also contains a viewer for
-# compressed HTML files.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_HTMLHELP      = NO
-
-# The CHM_FILE tag can be used to specify the file name of the resulting .chm
-# file. You can add a path in front of the file if the result should not be
-# written to the html output directory.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-CHM_FILE               =
-
-# The HHC_LOCATION tag can be used to specify the location (absolute path
-# including file name) of the HTML help compiler (hhc.exe). If non-empty,
-# doxygen will try to run the HTML help compiler on the generated index.hhp.
-# The file has to be specified with full path.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-HHC_LOCATION           =
-
-# The GENERATE_CHI flag controls if a separate .chi index file is generated
-# (YES) or that it should be included in the master .chm file (NO).
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-GENERATE_CHI           = NO
-
-# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
-# and project file content.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-CHM_INDEX_ENCODING     =
-
-# The BINARY_TOC flag controls whether a binary table of contents is generated
-# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
-# enables the Previous and Next buttons.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-BINARY_TOC             = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members to
-# the table of contents of the HTML help documentation and to the tree view.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-TOC_EXPAND             = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
-# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
-# (.qch) of the generated HTML documentation.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_QHP           = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
-# the file name of the resulting .qch file. The path specified is relative to
-# the HTML output folder.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QCH_FILE               =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
-# Project output. For more information please see Qt Help Project / Namespace
-# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_NAMESPACE          = se.edenhill.librdkafka
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
-# Help Project output. For more information please see Qt Help Project / Virtual
-# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
-# folders).
-# The default value is: doc.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_VIRTUAL_FOLDER     = doc
-
-# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
-# filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
-# filters).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_CUST_FILTER_NAME   =
-
-# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
-# filters).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_CUST_FILTER_ATTRS  =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's filter section matches. Qt Help Project / Filter Attributes (see:
-# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_SECT_FILTER_ATTRS  =
-
-# The QHG_LOCATION tag can be used to specify the location of Qt's
-# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
-# generated .qhp file.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHG_LOCATION           =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
-# generated, together with the HTML files, they form an Eclipse help plugin. To
-# install this plugin and make it available under the help contents menu in
-# Eclipse, the contents of the directory containing the HTML and XML files needs
-# to be copied into the plugins directory of eclipse. The name of the directory
-# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
-# After copying Eclipse needs to be restarted before the help appears.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_ECLIPSEHELP   = NO
-
-# A unique identifier for the Eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have this
-# name. Each documentation set should have its own identifier.
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
-
-ECLIPSE_DOC_ID         = se.edenhill.librdkafka
-
-# If you want full control over the layout of the generated HTML pages it might
-# be necessary to disable the index and replace it with your own. The
-# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
-# of each HTML page. A value of NO enables the index and the value YES disables
-# it. Since the tabs in the index contain the same information as the navigation
-# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-DISABLE_INDEX          = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information. If the tag
-# value is set to YES, a side panel will be generated containing a tree-like
-# index structure (just like the one that is generated for HTML Help). For this
-# to work a browser that supports JavaScript, DHTML, CSS and frames is required
-# (i.e. any modern browser). Windows users are probably better off using the
-# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
-# further fine-tune the look of the index. As an example, the default style
-# sheet generated by doxygen has an example that shows how to put an image at
-# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
-# the same information as the tab index, you could consider setting
-# DISABLE_INDEX to YES when enabling this option.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_TREEVIEW      = YES
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
-# doxygen will group on one line in the generated HTML documentation.
-#
-# Note that a value of 0 will completely suppress the enum values from appearing
-# in the overview section.
-# Minimum value: 0, maximum value: 20, default value: 4.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-ENUM_VALUES_PER_LINE   = 1
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
-# to set the initial width (in pixels) of the frame in which the tree is shown.
-# Minimum value: 0, maximum value: 1500, default value: 250.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-TREEVIEW_WIDTH         = 250
-
-# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
-# external symbols imported via tag files in a separate window.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-EXT_LINKS_IN_WINDOW    = NO
-
-# Use this tag to change the font size of LaTeX formulas included as images in
-# the HTML documentation. When you change the font size after a successful
-# doxygen run you need to manually remove any form_*.png images from the HTML
-# output directory to force them to be regenerated.
-# Minimum value: 8, maximum value: 50, default value: 10.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_FONTSIZE       = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are not
-# supported properly for IE 6.0, but are supported on all modern browsers.
-#
-# Note that when changing this option you need to delete any form_*.png files in
-# the HTML output directory before the changes have effect.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_TRANSPARENT    = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
-# http://www.mathjax.org) which uses client side Javascript for the rendering
-# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
-# installed or if you want to formulas look prettier in the HTML output. When
-# enabled you may also need to install MathJax separately and configure the path
-# to it using the MATHJAX_RELPATH option.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-USE_MATHJAX            = NO
-
-# When MathJax is enabled you can set the default output format to be used for
-# the MathJax output. See the MathJax site (see:
-# http://docs.mathjax.org/en/latest/output.html) for more details.
-# Possible values are: HTML-CSS (which is slower, but has the best
-# compatibility), NativeMML (i.e. MathML) and SVG.
-# The default value is: HTML-CSS.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_FORMAT         = HTML-CSS
-
-# When MathJax is enabled you need to specify the location relative to the HTML
-# output directory using the MATHJAX_RELPATH option. The destination directory
-# should contain the MathJax.js script. For instance, if the mathjax directory
-# is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
-# Content Delivery Network so you can quickly see the result without installing
-# MathJax. However, it is strongly recommended to install a local copy of
-# MathJax from http://www.mathjax.org before deployment.
-# The default value is: http://cdn.mathjax.org/mathjax/latest.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
-# extension names that should be enabled during MathJax rendering. For example
-# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_EXTENSIONS     =
-
-# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
-# of code that will be used on startup of the MathJax code. See the MathJax site
-# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
-# example see the documentation.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_CODEFILE       =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
-# the HTML output. The underlying search engine uses javascript and DHTML and
-# should work on any modern browser. Note that when using HTML help
-# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
-# there is already a search function so this one should typically be disabled.
-# For large projects the javascript based search engine can be slow, then
-# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
-# search using the keyboard; to jump to the search box use <access key> + S
-# (what the <access key> is depends on the OS and browser, but it is typically
-# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
-# key> to jump into the search results window, the results can be navigated
-# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
-# the search. The filter options can be selected when the cursor is inside the
-# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
-# to select a filter and <Enter> or <escape> to activate or cancel the filter
-# option.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-SEARCHENGINE           = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a web server instead of a web client using Javascript. There
-# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
-# setting. When disabled, doxygen will generate a PHP script for searching and
-# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
-# and searching needs to be provided by external tools. See the section
-# "External Indexing and Searching" for details.
-# The default value is: NO.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SERVER_BASED_SEARCH    = NO
-
-# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
-# script for searching. Instead the search results are written to an XML file
-# which needs to be processed by an external indexer. Doxygen will invoke an
-# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
-# search results.
-#
-# Doxygen ships with an example indexer (doxyindexer) and search engine
-# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: http://xapian.org/).
-#
-# See the section "External Indexing and Searching" for details.
-# The default value is: NO.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTERNAL_SEARCH        = NO
-
-# The SEARCHENGINE_URL should point to a search engine hosted by a web server
-# which will return the search results when EXTERNAL_SEARCH is enabled.
-#
-# Doxygen ships with an example indexer (doxyindexer) and search engine
-# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: http://xapian.org/). See the section "External Indexing and
-# Searching" for details.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SEARCHENGINE_URL       =
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
-# search data is written to a file for indexing by an external tool. With the
-# SEARCHDATA_FILE tag the name of this file can be specified.
-# The default file is: searchdata.xml.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SEARCHDATA_FILE        = searchdata.xml
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
-# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
-# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
-# projects and redirect the results back to the right project.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTERNAL_SEARCH_ID     =
-
-# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
-# projects other than the one defined by this configuration file, but that are
-# all added to the same external search index. Each project needs to have a
-# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
-# to a relative location where the documentation can be found. The format is:
-# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTRA_SEARCH_MAPPINGS  =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
-# The default value is: YES.
-
-GENERATE_LATEX         = YES
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: latex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_OUTPUT           = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked.
-#
-# Note that when enabling USE_PDFLATEX this option is only used for generating
-# bitmaps for formulas in the HTML output, but not in the Makefile that is
-# written to the output directory.
-# The default file is: latex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_CMD_NAME         = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
-# index for LaTeX.
-# The default file is: makeindex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-MAKEINDEX_CMD_NAME     = makeindex
-
-# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
-# documents. This may be useful for small projects and may help to save some
-# trees in general.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-COMPACT_LATEX          = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used by the
-# printer.
-# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
-# 14 inches) and executive (7.25 x 10.5 inches).
-# The default value is: a4.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-PAPER_TYPE             = a4
-
-# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
-# that should be included in the LaTeX output. To get the times font for
-# instance you can specify
-# EXTRA_PACKAGES=times
-# If left blank no extra packages will be included.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-EXTRA_PACKAGES         =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
-# generated LaTeX document. The header should contain everything until the first
-# chapter. If it is left blank doxygen will generate a standard header. See
-# section "Doxygen usage" for information on how to let doxygen write the
-# default header to a separate file.
-#
-# Note: Only use a user-defined header if you know what you are doing! The
-# following commands have a special meaning inside the header: $title,
-# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
-# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
-# string, for the replacement values of the other commands the user is referred
-# to HTML_HEADER.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_HEADER           =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
-# generated LaTeX document. The footer should contain everything after the last
-# chapter. If it is left blank doxygen will generate a standard footer. See
-# LATEX_HEADER for more information on how to generate a default footer and what
-# special commands can be used inside the footer.
-#
-# Note: Only use a user-defined footer if you know what you are doing!
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_FOOTER           =
-
-# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
-# LaTeX style sheets that are included after the standard style sheets created
-# by doxygen. Using this option one can overrule certain style aspects. Doxygen
-# will copy the style sheet files to the output directory.
-# Note: The order of the extra style sheet files is of importance (e.g. the last
-# style sheet in the list overrules the setting of the previous ones in the
-# list).
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_EXTRA_STYLESHEET =
-
-# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the LATEX_OUTPUT output
-# directory. Note that the files will be copied as-is; there are no commands or
-# markers available.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_EXTRA_FILES      =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
-# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
-# contain links (just like the HTML output) instead of page references. This
-# makes the output suitable for online browsing using a PDF viewer.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-PDF_HYPERLINKS         = YES
-
-# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
-# the PDF file directly from the LaTeX files. Set this option to YES, to get a
-# higher quality PDF documentation.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-USE_PDFLATEX           = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
-# command to the generated LaTeX files. This will instruct LaTeX to keep running
-# if errors occur, instead of asking the user for help. This option is also used
-# when generating formulas in HTML.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_BATCHMODE        = NO
-
-# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
-# index chapters (such as File Index, Compound Index, etc.) in the output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_HIDE_INDICES     = NO
-
-# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
-# code with syntax highlighting in the LaTeX output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_SOURCE_CODE      = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. See
-# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
-# The default value is: plain.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_BIB_STYLE        = plain
-
-#---------------------------------------------------------------------------
-# Configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
-# RTF output is optimized for Word 97 and may not look too pretty with other RTF
-# readers/editors.
-# The default value is: NO.
-
-GENERATE_RTF           = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: rtf.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_OUTPUT             = rtf
-
-# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
-# documents. This may be useful for small projects and may help to save some
-# trees in general.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-COMPACT_RTF            = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
-# contain hyperlink fields. The RTF file will contain links (just like the HTML
-# output) instead of page references. This makes the output suitable for online
-# browsing using Word or some other Word compatible readers that support those
-# fields.
-#
-# Note: WordPad (write) and others do not support links.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_HYPERLINKS         = NO
-
-# Load stylesheet definitions from file. Syntax is similar to doxygen's config
-# file, i.e. a series of assignments. You only have to provide replacements,
-# missing definitions are set to their default value.
-#
-# See also section "Doxygen usage" for information on how to generate the
-# default style sheet that doxygen normally uses.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_STYLESHEET_FILE    =
-
-# Set optional variables used in the generation of an RTF document. Syntax is
-# similar to doxygen's config file. A template extensions file can be generated
-# using doxygen -e rtf extensionFile.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_EXTENSIONS_FILE    =
-
-# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
-# with syntax highlighting in the RTF output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_SOURCE_CODE        = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
-# classes and files.
-# The default value is: NO.
-
-GENERATE_MAN           = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it. A directory man3 will be created inside the directory specified by
-# MAN_OUTPUT.
-# The default directory is: man.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_OUTPUT             = man
-
-# The MAN_EXTENSION tag determines the extension that is added to the generated
-# man pages. In case the manual section does not start with a number, the number
-# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
-# optional.
-# The default value is: .3.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_EXTENSION          = .3
-
-# The MAN_SUBDIR tag determines the name of the directory created within
-# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
-# MAN_EXTENSION with the initial . removed.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_SUBDIR             =
-
-# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
-# will generate one additional man file for each entity documented in the real
-# man page(s). These additional files only source the real man page, but without
-# them the man command would be unable to find the correct page.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_LINKS              = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
-# captures the structure of the code including all documentation.
-# The default value is: NO.
-
-GENERATE_XML           = NO
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: xml.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_OUTPUT             = xml
-
-# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
-# listings (including syntax highlighting and cross-referencing information) to
-# the XML output. Note that enabling this will significantly increase the size
-# of the XML output.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_PROGRAMLISTING     = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to the DOCBOOK output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
-# that can be used to generate PDF.
-# The default value is: NO.
-
-GENERATE_DOCBOOK       = NO
-
-# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
-# front of it.
-# The default directory is: docbook.
-# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
-
-DOCBOOK_OUTPUT         = docbook
-
-# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
-# program listings (including syntax highlighting and cross-referencing
-# information) to the DOCBOOK output. Note that enabling this will significantly
-# increase the size of the DOCBOOK output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
-
-DOCBOOK_PROGRAMLISTING = NO
-
-#---------------------------------------------------------------------------
-# Configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
-# AutoGen Definitions (see http://autogen.sf.net) file that captures the
-# structure of the code including all documentation. Note that this feature is
-# still experimental and incomplete at the moment.
-# The default value is: NO.
-
-GENERATE_AUTOGEN_DEF   = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
-# file that captures the structure of the code including all documentation.
-#
-# Note that this feature is still experimental and incomplete at the moment.
-# The default value is: NO.
-
-GENERATE_PERLMOD       = NO
-
-# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
-# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
-# output from the Perl module output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_LATEX          = NO
-
-# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
-# formatted so it can be parsed by a human reader. This is useful if you want to
-# understand what is going on. On the other hand, if this tag is set to NO, the
-# size of the Perl module output will be much smaller and Perl will parse it
-# just the same.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_PRETTY         = YES
-
-# The names of the make variables in the generated doxyrules.make file are
-# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
-# so different doxyrules.make files included by the same Makefile don't
-# overwrite each other's variables.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
-# C-preprocessor directives found in the sources and include files.
-# The default value is: YES.
-
-ENABLE_PREPROCESSING   = YES
-
-# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
-# in the source code. If set to NO, only conditional compilation will be
-# performed. Macro expansion can be done in a controlled way by setting
-# EXPAND_ONLY_PREDEF to YES.
-# The default value is: NO.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-MACRO_EXPANSION        = NO
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
-# the macro expansion is limited to the macros specified with the PREDEFINED and
-# EXPAND_AS_DEFINED tags.
-# The default value is: NO.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-EXPAND_ONLY_PREDEF     = NO
-
-# If the SEARCH_INCLUDES tag is set to YES, the include files in the
-# INCLUDE_PATH will be searched if a #include is found.
-# The default value is: YES.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-SEARCH_INCLUDES        = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by the
-# preprocessor.
-# This tag requires that the tag SEARCH_

<TRUNCATED>

[41/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/lz4.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/lz4.c b/thirdparty/librdkafka-0.11.1/src/lz4.c
deleted file mode 100644
index c9c5a07..0000000
--- a/thirdparty/librdkafka-0.11.1/src/lz4.c
+++ /dev/null
@@ -1,1462 +0,0 @@
-/*
-   LZ4 - Fast LZ compression algorithm
-   Copyright (C) 2011-2017, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-    - LZ4 homepage : http://www.lz4.org
-    - LZ4 source repository : https://github.com/lz4/lz4
-*/
-
-
-/*-************************************
-*  Tuning parameters
-**************************************/
-/*
- * HEAPMODE :
- * Select how default compression functions will allocate memory for their hash table,
- * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
- */
-#ifndef HEAPMODE
-#  define HEAPMODE 0
-#endif
-
-/*
- * ACCELERATION_DEFAULT :
- * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
- */
-#define ACCELERATION_DEFAULT 1
-
-
-/*-************************************
-*  CPU Feature Detection
-**************************************/
-/* LZ4_FORCE_MEMORY_ACCESS
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
- *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
- * Method 2 : direct access. This method is portable but violate C standard.
- *            It can generate buggy code on targets which assembly generation depends on alignment.
- *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
- * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
- * Prefer these methods in priority order (0 > 1 > 2)
- */
-#ifndef LZ4_FORCE_MEMORY_ACCESS   /* can be defined externally */
-#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-#    define LZ4_FORCE_MEMORY_ACCESS 2
-#  elif defined(__INTEL_COMPILER) || defined(__GNUC__)
-#    define LZ4_FORCE_MEMORY_ACCESS 1
-#  endif
-#endif
-
-/*
- * LZ4_FORCE_SW_BITCOUNT
- * Define this parameter if your target system or compiler does not support hardware bit count
- */
-#if defined(_MSC_VER) && defined(_WIN32_WCE)   /* Visual Studio for Windows CE does not support Hardware bit count */
-#  define LZ4_FORCE_SW_BITCOUNT
-#endif
-
-
-/*-************************************
-*  Dependency
-**************************************/
-#include "lz4.h"
-/* see also "memory routines" below */
-
-
-/*-************************************
-*  Compiler Options
-**************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  define FORCE_INLINE static __forceinline
-#  include <intrin.h>
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4293)        /* disable: C4293: too large shift (32-bits) */
-#else
-#  if defined(__GNUC__) || defined(__clang__)
-#    define FORCE_INLINE static inline __attribute__((always_inline))
-#  elif defined(__cplusplus) || (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#    define FORCE_INLINE static inline
-#  else
-#    define FORCE_INLINE static
-#  endif
-#endif  /* _MSC_VER */
-
-#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
-#  define expect(expr,value)    (__builtin_expect ((expr),(value)) )
-#else
-#  define expect(expr,value)    (expr)
-#endif
-
-#define likely(expr)     expect((expr) != 0, 1)
-#define unlikely(expr)   expect((expr) != 0, 0)
-
-
-/*-************************************
-*  Memory routines
-**************************************/
-#include <stdlib.h>   /* malloc, calloc, free */
-#define ALLOCATOR(n,s) calloc(n,s)
-#define FREEMEM        free
-#include <string.h>   /* memset, memcpy */
-#define MEM_INIT       memset
-
-
-/*-************************************
-*  Basic Types
-**************************************/
-#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-# include <stdint.h>
-  typedef  uint8_t BYTE;
-  typedef uint16_t U16;
-  typedef uint32_t U32;
-  typedef  int32_t S32;
-  typedef uint64_t U64;
-  typedef uintptr_t uptrval;
-#else
-  typedef unsigned char       BYTE;
-  typedef unsigned short      U16;
-  typedef unsigned int        U32;
-  typedef   signed int        S32;
-  typedef unsigned long long  U64;
-  typedef size_t              uptrval;   /* generally true, except OpenVMS-64 */
-#endif
-
-#if defined(__x86_64__)
-  typedef U64    reg_t;   /* 64-bits in x32 mode */
-#else
-  typedef size_t reg_t;   /* 32-bits in x32 mode */
-#endif
-
-/*-************************************
-*  Reading and writing into memory
-**************************************/
-static unsigned LZ4_isLittleEndian(void)
-{
-    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental */
-    return one.c[0];
-}
-
-
-#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
-/* lie to the compiler about data alignment; use with caution */
-
-static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
-static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
-static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
-
-static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
-static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
-
-#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
-
-static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
-static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
-static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
-
-static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
-static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
-
-#else  /* safe and portable access through memcpy() */
-
-static U16 LZ4_read16(const void* memPtr)
-{
-    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-static U32 LZ4_read32(const void* memPtr)
-{
-    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-static reg_t LZ4_read_ARCH(const void* memPtr)
-{
-    reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-static void LZ4_write16(void* memPtr, U16 value)
-{
-    memcpy(memPtr, &value, sizeof(value));
-}
-
-static void LZ4_write32(void* memPtr, U32 value)
-{
-    memcpy(memPtr, &value, sizeof(value));
-}
-
-#endif /* LZ4_FORCE_MEMORY_ACCESS */
-
-
-static U16 LZ4_readLE16(const void* memPtr)
-{
-    if (LZ4_isLittleEndian()) {
-        return LZ4_read16(memPtr);
-    } else {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U16)((U16)p[0] + (p[1]<<8));
-    }
-}
-
-static void LZ4_writeLE16(void* memPtr, U16 value)
-{
-    if (LZ4_isLittleEndian()) {
-        LZ4_write16(memPtr, value);
-    } else {
-        BYTE* p = (BYTE*)memPtr;
-        p[0] = (BYTE) value;
-        p[1] = (BYTE)(value>>8);
-    }
-}
-
-static void LZ4_copy8(void* dst, const void* src)
-{
-    memcpy(dst,src,8);
-}
-
-/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
-static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
-{
-    BYTE* d = (BYTE*)dstPtr;
-    const BYTE* s = (const BYTE*)srcPtr;
-    BYTE* const e = (BYTE*)dstEnd;
-
-    do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
-}
-
-
-/*-************************************
-*  Common Constants
-**************************************/
-#define MINMATCH 4
-
-#define WILDCOPYLENGTH 8
-#define LASTLITERALS 5
-#define MFLIMIT (WILDCOPYLENGTH+MINMATCH)
-static const int LZ4_minLength = (MFLIMIT+1);
-
-#define KB *(1 <<10)
-#define MB *(1 <<20)
-#define GB *(1U<<30)
-
-#define MAXD_LOG 16
-#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
-
-#define ML_BITS  4
-#define ML_MASK  ((1U<<ML_BITS)-1)
-#define RUN_BITS (8-ML_BITS)
-#define RUN_MASK ((1U<<RUN_BITS)-1)
-
-
-/*-************************************
-*  Common Utils
-**************************************/
-#define LZ4_STATIC_ASSERT(c)    { enum { LZ4_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-/*-************************************
-*  Common functions
-**************************************/
-static unsigned LZ4_NbCommonBytes (register reg_t val)
-{
-    if (LZ4_isLittleEndian()) {
-        if (sizeof(val)==8) {
-#       if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
-            unsigned long r = 0;
-            _BitScanForward64( &r, (U64)val );
-            return (int)(r>>3);
-#       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
-            return (__builtin_ctzll((U64)val) >> 3);
-#       else
-            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
-            return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
-#       endif
-        } else /* 32 bits */ {
-#       if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
-            unsigned long r;
-            _BitScanForward( &r, (U32)val );
-            return (int)(r>>3);
-#       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
-            return (__builtin_ctz((U32)val) >> 3);
-#       else
-            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
-            return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
-#       endif
-        }
-    } else   /* Big Endian CPU */ {
-        if (sizeof(val)==8) {
-#       if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
-            unsigned long r = 0;
-            _BitScanReverse64( &r, val );
-            return (unsigned)(r>>3);
-#       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
-            return (__builtin_clzll((U64)val) >> 3);
-#       else
-            unsigned r;
-            if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
-            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
-            r += (!val);
-            return r;
-#       endif
-        } else /* 32 bits */ {
-#       if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
-            unsigned long r = 0;
-            _BitScanReverse( &r, (unsigned long)val );
-            return (unsigned)(r>>3);
-#       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
-            return (__builtin_clz((U32)val) >> 3);
-#       else
-            unsigned r;
-            if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
-            r += (!val);
-            return r;
-#       endif
-        }
-    }
-}
-
-#define STEPSIZE sizeof(reg_t)
-static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
-{
-    const BYTE* const pStart = pIn;
-
-    while (likely(pIn<pInLimit-(STEPSIZE-1))) {
-        reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
-        if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
-        pIn += LZ4_NbCommonBytes(diff);
-        return (unsigned)(pIn - pStart);
-    }
-
-    if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
-    if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
-    if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
-    return (unsigned)(pIn - pStart);
-}
-
-
-#ifndef LZ4_COMMONDEFS_ONLY
-/*-************************************
-*  Local Constants
-**************************************/
-static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
-static const U32 LZ4_skipTrigger = 6;  /* Increase this value ==> compression run slower on incompressible data */
-
-
-/*-************************************
-*  Local Structures and types
-**************************************/
-typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
-typedef enum { byPtr, byU32, byU16 } tableType_t;
-
-typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
-typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
-
-typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
-typedef enum { full = 0, partial = 1 } earlyEnd_directive;
-
-
-/*-************************************
-*  Local Utils
-**************************************/
-int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
-const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
-int LZ4_compressBound(int isize)  { return LZ4_COMPRESSBOUND(isize); }
-int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
-
-
-/*-******************************
-*  Compression functions
-********************************/
-static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
-{
-    if (tableType == byU16)
-        return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
-    else
-        return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
-}
-
-static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
-{
-    static const U64 prime5bytes = 889523592379ULL;
-    static const U64 prime8bytes = 11400714785074694791ULL;
-    const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
-    if (LZ4_isLittleEndian())
-        return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
-    else
-        return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
-}
-
-FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
-{
-    if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
-    return LZ4_hash4(LZ4_read32(p), tableType);
-}
-
-static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase)
-{
-    switch (tableType)
-    {
-    case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
-    case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
-    case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
-    }
-}
-
-FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
-{
-    U32 const h = LZ4_hashPosition(p, tableType);
-    LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
-}
-
-static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
-{
-    if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
-    if (tableType == byU32) { const U32* const hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
-    { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; }   /* default, to ensure a return */
-}
-
-FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
-{
-    U32 const h = LZ4_hashPosition(p, tableType);
-    return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
-}
-
-
-/** LZ4_compress_generic() :
-    inlined, to ensure branches are decided at compilation time */
-FORCE_INLINE int LZ4_compress_generic(
-                 LZ4_stream_t_internal* const cctx,
-                 const char* const source,
-                 char* const dest,
-                 const int inputSize,
-                 const int maxOutputSize,
-                 const limitedOutput_directive outputLimited,
-                 const tableType_t tableType,
-                 const dict_directive dict,
-                 const dictIssue_directive dictIssue,
-                 const U32 acceleration)
-{
-    const BYTE* ip = (const BYTE*) source;
-    const BYTE* base;
-    const BYTE* lowLimit;
-    const BYTE* const lowRefLimit = ip - cctx->dictSize;
-    const BYTE* const dictionary = cctx->dictionary;
-    const BYTE* const dictEnd = dictionary + cctx->dictSize;
-    const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source;
-    const BYTE* anchor = (const BYTE*) source;
-    const BYTE* const iend = ip + inputSize;
-    const BYTE* const mflimit = iend - MFLIMIT;
-    const BYTE* const matchlimit = iend - LASTLITERALS;
-
-    BYTE* op = (BYTE*) dest;
-    BYTE* const olimit = op + maxOutputSize;
-
-    U32 forwardH;
-
-    /* Init conditions */
-    if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0;   /* Unsupported inputSize, too large (or negative) */
-    switch(dict)
-    {
-    case noDict:
-    default:
-        base = (const BYTE*)source;
-        lowLimit = (const BYTE*)source;
-        break;
-    case withPrefix64k:
-        base = (const BYTE*)source - cctx->currentOffset;
-        lowLimit = (const BYTE*)source - cctx->dictSize;
-        break;
-    case usingExtDict:
-        base = (const BYTE*)source - cctx->currentOffset;
-        lowLimit = (const BYTE*)source;
-        break;
-    }
-    if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0;   /* Size too large (not within 64K limit) */
-    if (inputSize<LZ4_minLength) goto _last_literals;                  /* Input too small, no compression (all literals) */
-
-    /* First Byte */
-    LZ4_putPosition(ip, cctx->hashTable, tableType, base);
-    ip++; forwardH = LZ4_hashPosition(ip, tableType);
-
-    /* Main Loop */
-    for ( ; ; ) {
-        ptrdiff_t refDelta = 0;
-        const BYTE* match;
-        BYTE* token;
-
-        /* Find a match */
-        {   const BYTE* forwardIp = ip;
-            unsigned step = 1;
-            unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
-            do {
-                U32 const h = forwardH;
-                ip = forwardIp;
-                forwardIp += step;
-                step = (searchMatchNb++ >> LZ4_skipTrigger);
-
-                if (unlikely(forwardIp > mflimit)) goto _last_literals;
-
-                match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
-                if (dict==usingExtDict) {
-                    if (match < (const BYTE*)source) {
-                        refDelta = dictDelta;
-                        lowLimit = dictionary;
-                    } else {
-                        refDelta = 0;
-                        lowLimit = (const BYTE*)source;
-                }   }
-                forwardH = LZ4_hashPosition(forwardIp, tableType);
-                LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
-
-            } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
-                || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
-                || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
-        }
-
-        /* Catch up */
-        while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
-
-        /* Encode Literals */
-        {   unsigned const litLength = (unsigned)(ip - anchor);
-            token = op++;
-            if ((outputLimited) &&  /* Check output buffer overflow */
-                (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
-                return 0;
-            if (litLength >= RUN_MASK) {
-                int len = (int)litLength-RUN_MASK;
-                *token = (RUN_MASK<<ML_BITS);
-                for(; len >= 255 ; len-=255) *op++ = 255;
-                *op++ = (BYTE)len;
-            }
-            else *token = (BYTE)(litLength<<ML_BITS);
-
-            /* Copy Literals */
-            LZ4_wildCopy(op, anchor, op+litLength);
-            op+=litLength;
-        }
-
-_next_match:
-        /* Encode Offset */
-        LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
-
-        /* Encode MatchLength */
-        {   unsigned matchCode;
-
-            if ((dict==usingExtDict) && (lowLimit==dictionary)) {
-                const BYTE* limit;
-                match += refDelta;
-                limit = ip + (dictEnd-match);
-                if (limit > matchlimit) limit = matchlimit;
-                matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
-                ip += MINMATCH + matchCode;
-                if (ip==limit) {
-                    unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit);
-                    matchCode += more;
-                    ip += more;
-                }
-            } else {
-                matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
-                ip += MINMATCH + matchCode;
-            }
-
-            if ( outputLimited &&    /* Check output buffer overflow */
-                (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) )
-                return 0;
-            if (matchCode >= ML_MASK) {
-                *token += ML_MASK;
-                matchCode -= ML_MASK;
-                LZ4_write32(op, 0xFFFFFFFF);
-                while (matchCode >= 4*255) op+=4, LZ4_write32(op, 0xFFFFFFFF), matchCode -= 4*255;
-                op += matchCode / 255;
-                *op++ = (BYTE)(matchCode % 255);
-            } else
-                *token += (BYTE)(matchCode);
-        }
-
-        anchor = ip;
-
-        /* Test end of chunk */
-        if (ip > mflimit) break;
-
-        /* Fill table */
-        LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
-
-        /* Test next position */
-        match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
-        if (dict==usingExtDict) {
-            if (match < (const BYTE*)source) {
-                refDelta = dictDelta;
-                lowLimit = dictionary;
-            } else {
-                refDelta = 0;
-                lowLimit = (const BYTE*)source;
-        }   }
-        LZ4_putPosition(ip, cctx->hashTable, tableType, base);
-        if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
-            && (match+MAX_DISTANCE>=ip)
-            && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
-        { token=op++; *token=0; goto _next_match; }
-
-        /* Prepare next loop */
-        forwardH = LZ4_hashPosition(++ip, tableType);
-    }
-
-_last_literals:
-    /* Encode Last Literals */
-    {   size_t const lastRun = (size_t)(iend - anchor);
-        if ( (outputLimited) &&  /* Check output buffer overflow */
-            ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) )
-            return 0;
-        if (lastRun >= RUN_MASK) {
-            size_t accumulator = lastRun - RUN_MASK;
-            *op++ = RUN_MASK << ML_BITS;
-            for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
-            *op++ = (BYTE) accumulator;
-        } else {
-            *op++ = (BYTE)(lastRun<<ML_BITS);
-        }
-        memcpy(op, anchor, lastRun);
-        op += lastRun;
-    }
-
-    /* End */
-    return (int) (((char*)op)-dest);
-}
-
-
-int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
-{
-    LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
-    LZ4_resetStream((LZ4_stream_t*)state);
-    if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
-
-    if (maxOutputSize >= LZ4_compressBound(inputSize)) {
-        if (inputSize < LZ4_64Klimit)
-            return LZ4_compress_generic(ctx, source, dest, inputSize,             0,    notLimited,                        byU16, noDict, noDictIssue, acceleration);
-        else
-            return LZ4_compress_generic(ctx, source, dest, inputSize,             0,    notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
-    } else {
-        if (inputSize < LZ4_64Klimit)
-            return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput,                        byU16, noDict, noDictIssue, acceleration);
-        else
-            return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
-    }
-}
-
-
-int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
-{
-#if (HEAPMODE)
-    void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
-#else
-    LZ4_stream_t ctx;
-    void* const ctxPtr = &ctx;
-#endif
-
-    int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
-
-#if (HEAPMODE)
-    FREEMEM(ctxPtr);
-#endif
-    return result;
-}
-
-
-int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize)
-{
-    return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1);
-}
-
-
-/* hidden debug function */
-/* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
-int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
-{
-    LZ4_stream_t ctx;
-    LZ4_resetStream(&ctx);
-
-    if (inputSize < LZ4_64Klimit)
-        return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16,                        noDict, noDictIssue, acceleration);
-    else
-        return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration);
-}
-
-
-/*-******************************
-*  *_destSize() variant
-********************************/
-
-static int LZ4_compress_destSize_generic(
-                       LZ4_stream_t_internal* const ctx,
-                 const char* const src,
-                       char* const dst,
-                       int*  const srcSizePtr,
-                 const int targetDstSize,
-                 const tableType_t tableType)
-{
-    const BYTE* ip = (const BYTE*) src;
-    const BYTE* base = (const BYTE*) src;
-    const BYTE* lowLimit = (const BYTE*) src;
-    const BYTE* anchor = ip;
-    const BYTE* const iend = ip + *srcSizePtr;
-    const BYTE* const mflimit = iend - MFLIMIT;
-    const BYTE* const matchlimit = iend - LASTLITERALS;
-
-    BYTE* op = (BYTE*) dst;
-    BYTE* const oend = op + targetDstSize;
-    BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
-    BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
-    BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
-
-    U32 forwardH;
-
-
-    /* Init conditions */
-    if (targetDstSize < 1) return 0;                                     /* Impossible to store anything */
-    if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0;            /* Unsupported input size, too large (or negative) */
-    if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0;   /* Size too large (not within 64K limit) */
-    if (*srcSizePtr<LZ4_minLength) goto _last_literals;                  /* Input too small, no compression (all literals) */
-
-    /* First Byte */
-    *srcSizePtr = 0;
-    LZ4_putPosition(ip, ctx->hashTable, tableType, base);
-    ip++; forwardH = LZ4_hashPosition(ip, tableType);
-
-    /* Main Loop */
-    for ( ; ; ) {
-        const BYTE* match;
-        BYTE* token;
-
-        /* Find a match */
-        {   const BYTE* forwardIp = ip;
-            unsigned step = 1;
-            unsigned searchMatchNb = 1 << LZ4_skipTrigger;
-
-            do {
-                U32 h = forwardH;
-                ip = forwardIp;
-                forwardIp += step;
-                step = (searchMatchNb++ >> LZ4_skipTrigger);
-
-                if (unlikely(forwardIp > mflimit)) goto _last_literals;
-
-                match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base);
-                forwardH = LZ4_hashPosition(forwardIp, tableType);
-                LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base);
-
-            } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
-                || (LZ4_read32(match) != LZ4_read32(ip)) );
-        }
-
-        /* Catch up */
-        while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
-
-        /* Encode Literal length */
-        {   unsigned litLength = (unsigned)(ip - anchor);
-            token = op++;
-            if (op + ((litLength+240)/255) + litLength > oMaxLit) {
-                /* Not enough space for a last match */
-                op--;
-                goto _last_literals;
-            }
-            if (litLength>=RUN_MASK) {
-                unsigned len = litLength - RUN_MASK;
-                *token=(RUN_MASK<<ML_BITS);
-                for(; len >= 255 ; len-=255) *op++ = 255;
-                *op++ = (BYTE)len;
-            }
-            else *token = (BYTE)(litLength<<ML_BITS);
-
-            /* Copy Literals */
-            LZ4_wildCopy(op, anchor, op+litLength);
-            op += litLength;
-        }
-
-_next_match:
-        /* Encode Offset */
-        LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
-
-        /* Encode MatchLength */
-        {   size_t matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
-
-            if (op + ((matchLength+240)/255) > oMaxMatch) {
-                /* Match description too long : reduce it */
-                matchLength = (15-1) + (oMaxMatch-op) * 255;
-            }
-            ip += MINMATCH + matchLength;
-
-            if (matchLength>=ML_MASK) {
-                *token += ML_MASK;
-                matchLength -= ML_MASK;
-                while (matchLength >= 255) { matchLength-=255; *op++ = 255; }
-                *op++ = (BYTE)matchLength;
-            }
-            else *token += (BYTE)(matchLength);
-        }
-
-        anchor = ip;
-
-        /* Test end of block */
-        if (ip > mflimit) break;
-        if (op > oMaxSeq) break;
-
-        /* Fill table */
-        LZ4_putPosition(ip-2, ctx->hashTable, tableType, base);
-
-        /* Test next position */
-        match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
-        LZ4_putPosition(ip, ctx->hashTable, tableType, base);
-        if ( (match+MAX_DISTANCE>=ip)
-            && (LZ4_read32(match)==LZ4_read32(ip)) )
-        { token=op++; *token=0; goto _next_match; }
-
-        /* Prepare next loop */
-        forwardH = LZ4_hashPosition(++ip, tableType);
-    }
-
-_last_literals:
-    /* Encode Last Literals */
-    {   size_t lastRunSize = (size_t)(iend - anchor);
-        if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) {
-            /* adapt lastRunSize to fill 'dst' */
-            lastRunSize  = (oend-op) - 1;
-            lastRunSize -= (lastRunSize+240)/255;
-        }
-        ip = anchor + lastRunSize;
-
-        if (lastRunSize >= RUN_MASK) {
-            size_t accumulator = lastRunSize - RUN_MASK;
-            *op++ = RUN_MASK << ML_BITS;
-            for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
-            *op++ = (BYTE) accumulator;
-        } else {
-            *op++ = (BYTE)(lastRunSize<<ML_BITS);
-        }
-        memcpy(op, anchor, lastRunSize);
-        op += lastRunSize;
-    }
-
-    /* End */
-    *srcSizePtr = (int) (((const char*)ip)-src);
-    return (int) (((char*)op)-dst);
-}
-
-
-static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
-{
-    LZ4_resetStream(state);
-
-    if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) {  /* compression success is guaranteed */
-        return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
-    } else {
-        if (*srcSizePtr < LZ4_64Klimit)
-            return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16);
-        else
-            return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr);
-    }
-}
-
-
-int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
-{
-#if (HEAPMODE)
-    LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR(1, sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
-#else
-    LZ4_stream_t ctxBody;
-    LZ4_stream_t* ctx = &ctxBody;
-#endif
-
-    int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
-
-#if (HEAPMODE)
-    FREEMEM(ctx);
-#endif
-    return result;
-}
-
-
-
-/*-******************************
-*  Streaming functions
-********************************/
-
-LZ4_stream_t* LZ4_createStream(void)
-{
-    LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
-    LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal));    /* A compilation error here means LZ4_STREAMSIZE is not large enough */
-    LZ4_resetStream(lz4s);
-    return lz4s;
-}
-
-void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
-{
-    MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
-}
-
-int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
-{
-    FREEMEM(LZ4_stream);
-    return (0);
-}
-
-
-#define HASH_UNIT sizeof(reg_t)
-int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
-{
-    LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
-    const BYTE* p = (const BYTE*)dictionary;
-    const BYTE* const dictEnd = p + dictSize;
-    const BYTE* base;
-
-    if ((dict->initCheck) || (dict->currentOffset > 1 GB))  /* Uninitialized structure, or reuse overflow */
-        LZ4_resetStream(LZ4_dict);
-
-    if (dictSize < (int)HASH_UNIT) {
-        dict->dictionary = NULL;
-        dict->dictSize = 0;
-        return 0;
-    }
-
-    if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
-    dict->currentOffset += 64 KB;
-    base = p - dict->currentOffset;
-    dict->dictionary = p;
-    dict->dictSize = (U32)(dictEnd - p);
-    dict->currentOffset += dict->dictSize;
-
-    while (p <= dictEnd-HASH_UNIT) {
-        LZ4_putPosition(p, dict->hashTable, byU32, base);
-        p+=3;
-    }
-
-    return dict->dictSize;
-}
-
-
-static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
-{
-    if ((LZ4_dict->currentOffset > 0x80000000) ||
-        ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {   /* address space overflow */
-        /* rescale hash table */
-        U32 const delta = LZ4_dict->currentOffset - 64 KB;
-        const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
-        int i;
-        for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
-            if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
-            else LZ4_dict->hashTable[i] -= delta;
-        }
-        LZ4_dict->currentOffset = 64 KB;
-        if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
-        LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
-    }
-}
-
-
-int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
-{
-    LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
-    const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
-
-    const BYTE* smallest = (const BYTE*) source;
-    if (streamPtr->initCheck) return 0;   /* Uninitialized structure detected */
-    if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
-    LZ4_renormDictT(streamPtr, smallest);
-    if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
-
-    /* Check overlapping input/dictionary space */
-    {   const BYTE* sourceEnd = (const BYTE*) source + inputSize;
-        if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
-            streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
-            if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
-            if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
-            streamPtr->dictionary = dictEnd - streamPtr->dictSize;
-        }
-    }
-
-    /* prefix mode : source data follows dictionary */
-    if (dictEnd == (const BYTE*)source) {
-        int result;
-        if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
-            result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration);
-        else
-            result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration);
-        streamPtr->dictSize += (U32)inputSize;
-        streamPtr->currentOffset += (U32)inputSize;
-        return result;
-    }
-
-    /* external dictionary mode */
-    {   int result;
-        if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
-            result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration);
-        else
-            result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration);
-        streamPtr->dictionary = (const BYTE*)source;
-        streamPtr->dictSize = (U32)inputSize;
-        streamPtr->currentOffset += (U32)inputSize;
-        return result;
-    }
-}
-
-
-/* Hidden debug function, to force external dictionary mode */
-int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
-{
-    LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
-    int result;
-    const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
-
-    const BYTE* smallest = dictEnd;
-    if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
-    LZ4_renormDictT(streamPtr, smallest);
-
-    result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
-
-    streamPtr->dictionary = (const BYTE*)source;
-    streamPtr->dictSize = (U32)inputSize;
-    streamPtr->currentOffset += (U32)inputSize;
-
-    return result;
-}
-
-
-/*! LZ4_saveDict() :
- *  If previously compressed data block is not guaranteed to remain available at its memory location,
- *  save it into a safer place (char* safeBuffer).
- *  Note : you don't need to call LZ4_loadDict() afterwards,
- *         dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
- *  Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
- */
-int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
-{
-    LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
-    const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
-
-    if ((U32)dictSize > 64 KB) dictSize = 64 KB;   /* useless to define a dictionary > 64 KB */
-    if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
-
-    memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
-
-    dict->dictionary = (const BYTE*)safeBuffer;
-    dict->dictSize = (U32)dictSize;
-
-    return dictSize;
-}
-
-
-
-/*-*****************************
-*  Decompression functions
-*******************************/
-/*! LZ4_decompress_generic() :
- *  This generic decompression function cover all use cases.
- *  It shall be instantiated several times, using different sets of directives
- *  Note that it is important this generic function is really inlined,
- *  in order to remove useless branches during compilation optimization.
- */
-FORCE_INLINE int LZ4_decompress_generic(
-                 const char* const source,
-                 char* const dest,
-                 int inputSize,
-                 int outputSize,         /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
-
-                 int endOnInput,         /* endOnOutputSize, endOnInputSize */
-                 int partialDecoding,    /* full, partial */
-                 int targetOutputSize,   /* only used if partialDecoding==partial */
-                 int dict,               /* noDict, withPrefix64k, usingExtDict */
-                 const BYTE* const lowPrefix,  /* == dest when no prefix */
-                 const BYTE* const dictStart,  /* only if dict==usingExtDict */
-                 const size_t dictSize         /* note : = 0 if noDict */
-                 )
-{
-    /* Local Variables */
-    const BYTE* ip = (const BYTE*) source;
-    const BYTE* const iend = ip + inputSize;
-
-    BYTE* op = (BYTE*) dest;
-    BYTE* const oend = op + outputSize;
-    BYTE* cpy;
-    BYTE* oexit = op + targetOutputSize;
-    const BYTE* const lowLimit = lowPrefix - dictSize;
-
-    const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
-    const unsigned dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};
-    const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
-
-    const int safeDecode = (endOnInput==endOnInputSize);
-    const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
-
-
-    /* Special cases */
-    if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT;                        /* targetOutputSize too high => decode everything */
-    if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1;  /* Empty output buffer */
-    if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
-
-    /* Main Loop : decode sequences */
-    while (1) {
-        size_t length;
-        const BYTE* match;
-        size_t offset;
-
-        /* get literal length */
-        unsigned const token = *ip++;
-        if ((length=(token>>ML_BITS)) == RUN_MASK) {
-            unsigned s;
-            do {
-                s = *ip++;
-                length += s;
-            } while ( likely(endOnInput ? ip<iend-RUN_MASK : 1) & (s==255) );
-            if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) goto _output_error;   /* overflow detection */
-            if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) goto _output_error;   /* overflow detection */
-        }
-
-        /* copy literals */
-        cpy = op+length;
-        if ( ((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
-            || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
-        {
-            if (partialDecoding) {
-                if (cpy > oend) goto _output_error;                           /* Error : write attempt beyond end of output buffer */
-                if ((endOnInput) && (ip+length > iend)) goto _output_error;   /* Error : read attempt beyond end of input buffer */
-            } else {
-                if ((!endOnInput) && (cpy != oend)) goto _output_error;       /* Error : block decoding must stop exactly there */
-                if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error;   /* Error : input must be consumed */
-            }
-            memcpy(op, ip, length);
-            ip += length;
-            op += length;
-            break;     /* Necessarily EOF, due to parsing restrictions */
-        }
-        LZ4_wildCopy(op, ip, cpy);
-        ip += length; op = cpy;
-
-        /* get offset */
-        offset = LZ4_readLE16(ip); ip+=2;
-        match = op - offset;
-        if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error;   /* Error : offset outside buffers */
-        LZ4_write32(op, (U32)offset);   /* costs ~1%; silence an msan warning when offset==0 */
-
-        /* get matchlength */
-        length = token & ML_MASK;
-        if (length == ML_MASK) {
-            unsigned s;
-            do {
-                s = *ip++;
-                if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
-                length += s;
-            } while (s==255);
-            if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error;   /* overflow detection */
-        }
-        length += MINMATCH;
-
-        /* check external dictionary */
-        if ((dict==usingExtDict) && (match < lowPrefix)) {
-            if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error;   /* doesn't respect parsing restriction */
-
-            if (length <= (size_t)(lowPrefix-match)) {
-                /* match can be copied as a single segment from external dictionary */
-                memmove(op, dictEnd - (lowPrefix-match), length);
-                op += length;
-            } else {
-                /* match encompass external dictionary and current block */
-                size_t const copySize = (size_t)(lowPrefix-match);
-                size_t const restSize = length - copySize;
-                memcpy(op, dictEnd - copySize, copySize);
-                op += copySize;
-                if (restSize > (size_t)(op-lowPrefix)) {  /* overlap copy */
-                    BYTE* const endOfMatch = op + restSize;
-                    const BYTE* copyFrom = lowPrefix;
-                    while (op < endOfMatch) *op++ = *copyFrom++;
-                } else {
-                    memcpy(op, lowPrefix, restSize);
-                    op += restSize;
-            }   }
-            continue;
-        }
-
-        /* copy match within block */
-        cpy = op + length;
-        if (unlikely(offset<8)) {
-            const int dec64 = dec64table[offset];
-            op[0] = match[0];
-            op[1] = match[1];
-            op[2] = match[2];
-            op[3] = match[3];
-            match += dec32table[offset];
-            memcpy(op+4, match, 4);
-            match -= dec64;
-        } else { LZ4_copy8(op, match); match+=8; }
-        op += 8;
-
-        if (unlikely(cpy>oend-12)) {
-            BYTE* const oCopyLimit = oend-(WILDCOPYLENGTH-1);
-            if (cpy > oend-LASTLITERALS) goto _output_error;    /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
-            if (op < oCopyLimit) {
-                LZ4_wildCopy(op, match, oCopyLimit);
-                match += oCopyLimit - op;
-                op = oCopyLimit;
-            }
-            while (op<cpy) *op++ = *match++;
-        } else {
-            LZ4_copy8(op, match);
-            if (length>16) LZ4_wildCopy(op+8, match+8, cpy);
-        }
-        op=cpy;   /* correction */
-    }
-
-    /* end of decoding */
-    if (endOnInput)
-       return (int) (((char*)op)-dest);     /* Nb of output bytes decoded */
-    else
-       return (int) (((const char*)ip)-source);   /* Nb of input bytes read */
-
-    /* Overflow error detected */
-_output_error:
-    return (int) (-(((const char*)ip)-source))-1;
-}
-
-
-int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
-{
-    return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0);
-}
-
-int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
-{
-    return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
-}
-
-int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
-{
-    return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
-}
-
-
-/*===== streaming decompression functions =====*/
-
-/*
- * If you prefer dynamic allocation methods,
- * LZ4_createStreamDecode()
- * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
- */
-LZ4_streamDecode_t* LZ4_createStreamDecode(void)
-{
-    LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t));
-    return lz4s;
-}
-
-int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
-{
-    FREEMEM(LZ4_stream);
-    return 0;
-}
-
-/*!
- * LZ4_setStreamDecode() :
- * Use this function to instruct where to find the dictionary.
- * This function is not necessary if previous data is still available where it was decoded.
- * Loading a size of 0 is allowed (same effect as no dictionary).
- * Return : 1 if OK, 0 if error
- */
-int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
-{
-    LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
-    lz4sd->prefixSize = (size_t) dictSize;
-    lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
-    lz4sd->externalDict = NULL;
-    lz4sd->extDictSize  = 0;
-    return 1;
-}
-
-/*
-*_continue() :
-    These decoding functions allow decompression of multiple blocks in "streaming" mode.
-    Previously decoded blocks must still be available at the memory position where they were decoded.
-    If it's not possible, save the relevant part of decoded data into a safe buffer,
-    and indicate where it stands using LZ4_setStreamDecode()
-*/
-int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
-{
-    LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
-    int result;
-
-    if (lz4sd->prefixEnd == (BYTE*)dest) {
-        result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
-                                        endOnInputSize, full, 0,
-                                        usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
-        if (result <= 0) return result;
-        lz4sd->prefixSize += result;
-        lz4sd->prefixEnd  += result;
-    } else {
-        lz4sd->extDictSize = lz4sd->prefixSize;
-        lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
-        result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
-                                        endOnInputSize, full, 0,
-                                        usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
-        if (result <= 0) return result;
-        lz4sd->prefixSize = result;
-        lz4sd->prefixEnd  = (BYTE*)dest + result;
-    }
-
-    return result;
-}
-
-int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
-{
-    LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
-    int result;
-
-    if (lz4sd->prefixEnd == (BYTE*)dest) {
-        result = LZ4_decompress_generic(source, dest, 0, originalSize,
-                                        endOnOutputSize, full, 0,
-                                        usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
-        if (result <= 0) return result;
-        lz4sd->prefixSize += originalSize;
-        lz4sd->prefixEnd  += originalSize;
-    } else {
-        lz4sd->extDictSize = lz4sd->prefixSize;
-        lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
-        result = LZ4_decompress_generic(source, dest, 0, originalSize,
-                                        endOnOutputSize, full, 0,
-                                        usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
-        if (result <= 0) return result;
-        lz4sd->prefixSize = originalSize;
-        lz4sd->prefixEnd  = (BYTE*)dest + originalSize;
-    }
-
-    return result;
-}
-
-
-/*
-Advanced decoding functions :
-*_usingDict() :
-    These decoding functions work the same as "_continue" ones,
-    the dictionary must be explicitly provided within parameters
-*/
-
-FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize)
-{
-    if (dictSize==0)
-        return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
-    if (dictStart+dictSize == dest) {
-        if (dictSize >= (int)(64 KB - 1))
-            return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0);
-        return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0);
-    }
-    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
-}
-
-int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
-{
-    return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
-}
-
-int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
-{
-    return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
-}
-
-/* debug function */
-int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
-{
-    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
-}
-
-
-/*=*************************************************
-*  Obsolete Functions
-***************************************************/
-/* obsolete compression functions */
-int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); }
-int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); }
-int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); }
-int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); }
-int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); }
-int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); }
-
-/*
-These function names are deprecated and should no longer be used.
-They are only provided here for compatibility with older user programs.
-- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
-- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
-*/
-int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
-int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
-
-
-/* Obsolete Streaming functions */
-
-int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
-
-static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base)
-{
-    MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t));
-    lz4ds->internal_donotuse.bufferStart = base;
-}
-
-int LZ4_resetStreamState(void* state, char* inputBuffer)
-{
-    if ((((uptrval)state) & 3) != 0) return 1;   /* Error : pointer is not aligned on 4-bytes boundary */
-    LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer);
-    return 0;
-}
-
-void* LZ4_create (char* inputBuffer)
-{
-    LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t));
-    LZ4_init (lz4ds, (BYTE*)inputBuffer);
-    return lz4ds;
-}
-
-char* LZ4_slideInputBuffer (void* LZ4_Data)
-{
-    LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse;
-    int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
-    return (char*)(ctx->bufferStart + dictSize);
-}
-
-/* Obsolete streaming decompression functions */
-
-int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
-{
-    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
-}
-
-int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
-{
-    return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
-}
-
-#endif   /* LZ4_COMMONDEFS_ONLY */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/lz4.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/lz4.h b/thirdparty/librdkafka-0.11.1/src/lz4.h
deleted file mode 100644
index 588de22..0000000
--- a/thirdparty/librdkafka-0.11.1/src/lz4.h
+++ /dev/null
@@ -1,463 +0,0 @@
-/*
- *  LZ4 - Fast LZ compression algorithm
- *  Header File
- *  Copyright (C) 2011-2017, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-    - LZ4 homepage : http://www.lz4.org
-    - LZ4 source repository : https://github.com/lz4/lz4
-*/
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#ifndef LZ4_H_2983827168210
-#define LZ4_H_2983827168210
-
-/* --- Dependency --- */
-#include <stddef.h>   /* size_t */
-
-
-/**
-  Introduction
-
-  LZ4 is lossless compression algorithm, providing compression speed at 400 MB/s per core,
-  scalable with multi-cores CPU. It features an extremely fast decoder, with speed in
-  multiple GB/s per core, typically reaching RAM speed limits on multi-core systems.
-
-  The LZ4 compression library provides in-memory compression and decompression functions.
-  Compression can be done in:
-    - a single step (described as Simple Functions)
-    - a single step, reusing a context (described in Advanced Functions)
-    - unbounded multiple steps (described as Streaming compression)
-
-  lz4.h provides block compression functions. It gives full buffer control to user.
-  Decompressing an lz4-compressed block also requires metadata (such as compressed size).
-  Each application is free to encode such metadata in whichever way it wants.
-
-  An additional format, called LZ4 frame specification (doc/lz4_Frame_format.md),
-  take care of encoding standard metadata alongside LZ4-compressed blocks.
-  If your application requires interoperability, it's recommended to use it.
-  A library is provided to take care of it, see lz4frame.h.
-*/
-
-/*^***************************************************************
-*  Export parameters
-*****************************************************************/
-/*
-*  LZ4_DLL_EXPORT :
-*  Enable exporting of functions when building a Windows DLL
-*  LZ4LIB_API :
-*  Control library symbols visibility.
-*/
-#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
-#  define LZ4LIB_API __declspec(dllexport)
-#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
-#  define LZ4LIB_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
-#elif defined(__GNUC__) && (__GNUC__ >= 4)
-#  define LZ4LIB_API __attribute__ ((__visibility__ ("default")))
-#else
-#  define LZ4LIB_API
-#endif
-
-
-/*------   Version   ------*/
-#define LZ4_VERSION_MAJOR    1    /* for breaking interface changes  */
-#define LZ4_VERSION_MINOR    7    /* for new (non-breaking) interface capabilities */
-#define LZ4_VERSION_RELEASE  6    /* for tweaks, bug-fixes, or development */
-
-#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
-
-#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE
-#define LZ4_QUOTE(str) #str
-#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str)
-#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION)
-
-LZ4LIB_API int LZ4_versionNumber (void);  /**< library version number; to be used when checking dll version */
-LZ4LIB_API const char* LZ4_versionString (void);   /**< library version string; to be used when checking dll version */
-
-
-/*-************************************
-*  Tuning parameter
-**************************************/
-/*!
- * LZ4_MEMORY_USAGE :
- * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
- * Increasing memory usage improves compression ratio
- * Reduced memory usage can improve speed, due to cache effect
- * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
- */
-#ifndef LZ4_MEMORY_USAGE
-# define LZ4_MEMORY_USAGE 14
-#endif
-
-/*-************************************
-*  Simple Functions
-**************************************/
-/*! LZ4_compress_default() :
-    Compresses 'sourceSize' bytes from buffer 'source'
-    into already allocated 'dest' buffer of size 'maxDestSize'.
-    Compression is guaranteed to succeed if 'maxDestSize' >= LZ4_compressBound(sourceSize).
-    It also runs faster, so it's a recommended setting.
-    If the function cannot compress 'source' into a more limited 'dest' budget,
-    compression stops *immediately*, and the function result is zero.
-    As a consequence, 'dest' content is not valid.
-    This function never writes outside 'dest' buffer, nor read outside 'source' buffer.
-        sourceSize  : Max supported value is LZ4_MAX_INPUT_VALUE
-        maxDestSize : full or partial size of buffer 'dest' (which must be already allocated)
-        return : the number of bytes written into buffer 'dest' (necessarily <= maxOutputSize)
-              or 0 if compression fails */
-LZ4LIB_API int LZ4_compress_default(const char* source, char* dest, int sourceSize, int maxDestSize);
-
-/*! LZ4_decompress_safe() :
-    compressedSize : is the precise full size of the compressed block.
-    maxDecompressedSize : is the size of destination buffer, which must be already allocated.
-    return : the number of bytes decompressed into destination buffer (necessarily <= maxDecompressedSize)
-             If destination buffer is not large enough, decoding will stop and output an error code (<0).
-             If the source stream is detected malformed, the function will stop decoding and return a negative result.
-             This function is protected against buffer overflow exploits, including malicious data packets.
-             It never writes outside output buffer, nor reads outside input buffer.
-*/
-LZ4LIB_API int LZ4_decompress_safe (const char* source, char* dest, int compressedSize, int maxDecompressedSize);
-
-
-/*-************************************
-*  Advanced Functions
-**************************************/
-#define LZ4_MAX_INPUT_SIZE        0x7E000000   /* 2 113 929 216 bytes */
-#define LZ4_COMPRESSBOUND(isize)  ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
-
-/*!
-LZ4_compressBound() :
-    Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible)
-    This function is primarily useful for memory allocation purposes (destination buffer size).
-    Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example).
-    Note that LZ4_compress_default() compress faster when dest buffer size is >= LZ4_compressBound(srcSize)
-        inputSize  : max supported value is LZ4_MAX_INPUT_SIZE
-        return : maximum output size in a "worst case" scenario
-              or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE)
-*/
-LZ4LIB_API int LZ4_compressBound(int inputSize);
-
-/*!
-LZ4_compress_fast() :
-    Same as LZ4_compress_default(), but allows to select an "acceleration" factor.
-    The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
-    It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
-    An acceleration value of "1" is the same as regular LZ4_compress_default()
-    Values <= 0 will be replaced by ACCELERATION_DEFAULT (see lz4.c), which is 1.
-*/
-LZ4LIB_API int LZ4_compress_fast (const char* source, char* dest, int sourceSize, int maxDestSize, int acceleration);
-
-
-/*!
-LZ4_compress_fast_extState() :
-    Same compression function, just using an externally allocated memory space to store compression state.
-    Use LZ4_sizeofState() to know how much memory must be allocated,
-    and allocate it on 8-bytes boundaries (using malloc() typically).
-    Then, provide it as 'void* state' to compression function.
-*/
-LZ4LIB_API int LZ4_sizeofState(void);
-LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* source, char* dest, int inputSize, int maxDestSize, int acceleration);
-
-
-/*!
-LZ4_compress_destSize() :
-    Reverse the logic, by compressing as much data as possible from 'source' buffer
-    into already allocated buffer 'dest' of size 'targetDestSize'.
-    This function either compresses the entire 'source' content into 'dest' if it's large enough,
-    or fill 'dest' buffer completely with as much data as possible from 'source'.
-        *sourceSizePtr : will be modified to indicate how many bytes where read from 'source' to fill 'dest'.
-                         New value is necessarily <= old value.
-        return : Nb bytes written into 'dest' (necessarily <= targetDestSize)
-              or 0 if compression fails
-*/
-LZ4LIB_API int LZ4_compress_destSize (const char* source, char* dest, int* sourceSizePtr, int targetDestSize);
-
-
-/*!
-LZ4_decompress_fast() :
-    originalSize : is the original and therefore uncompressed size
-    return : the number of bytes read from the source buffer (in other words, the compressed size)
-             If the source stream is detected malformed, the function will stop decoding and return a negative result.
-             Destination buffer must be already allocated. Its size must be a minimum of 'originalSize' bytes.
-    note : This function fully respect memory boundaries for properly formed compressed data.
-           It is a bit faster than LZ4_decompress_safe().
-           However, it does not provide any protection against intentionally modified data stream (malicious input).
-           Use this function in trusted environment only (data to decode comes from a trusted source).
-*/
-LZ4LIB_API int LZ4_decompress_fast (const char* source, char* dest, int originalSize);
-
-/*!
-LZ4_decompress_safe_partial() :
-    This function decompress a compressed block of size 'compressedSize' at position 'source'
-    into destination buffer 'dest' of size 'maxDecompressedSize'.
-    The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached,
-    reducing decompression time.
-    return : the number of bytes decoded in the destination buffer (necessarily <= maxDecompressedSize)
-       Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller.
-             Always control how many bytes were decoded.
-             If the source stream is detected malformed, the function will stop decoding and return a negative result.
-             This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets
-*/
-LZ4LIB_API int LZ4_decompress_safe_partial (const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize);
-
-
-/*-*********************************************
-*  Streaming Compression Functions
-***********************************************/
-typedef union LZ4_stream_u LZ4_stream_t;   /* incomplete type (defined later) */
-
-/*! LZ4_createStream() and LZ4_freeStream() :
- *  LZ4_createStream() will allocate and initialize an `LZ4_stream_t` structure.
- *  LZ4_freeStream() releases its memory.
- */
-LZ4LIB_API LZ4_stream_t* LZ4_createStream(void);
-LZ4LIB_API int           LZ4_freeStream (LZ4_stream_t* streamPtr);
-
-/*! LZ4_resetStream() :
- *  An LZ4_stream_t structure can be allocated once and re-used multiple times.
- *  Use this function to init an allocated `LZ4_stream_t` structure and start a new compression.
- */
-LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr);
-
-/*! LZ4_loadDict() :
- *  Use this function to load a static dictionary into LZ4_stream.
- *  Any previous data will be forgotten, only 'dictionary' will remain in memory.
- *  Loading a size of 0 is allowed.
- *  Return : dictionary size, in bytes (necessarily <= 64 KB)
- */
-LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
-
-/*! LZ4_compress_fast_continue() :
- *  Compress buffer content 'src', using data from previously compressed blocks as dictionary to improve compression ratio.
- *  Important : Previous data blocks are assumed to still be present and unmodified !
- *  'dst' buffer must be already allocated.
- *  If maxDstSize >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
- *  If not, and if compressed data cannot fit into 'dst' buffer size, compression stops, and function returns a zero.
- */
-LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int maxDstSize, int acceleration);
-
-/*! LZ4_saveDict() :
- *  If previously compressed data block is not guaranteed to remain available at its memory location,
- *  save it into a safer place (char* safeBuffer).
- *  Note : you don't need to call LZ4_loadDict() afterwards,
- *         dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
- *  Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
- */
-LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int dictSize);
-
-
-/*-**********************************************
-*  Streaming Decompression Functions
-*  Bufferless synchronous API
-************************************************/
-typedef union LZ4_streamDecode_u LZ4_streamDecode_t;   /* incomplete type (defined later) */
-
-/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() :
- *  creation / destruction of streaming decompression tracking structure */
-LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void);
-LZ4LIB_API int                 LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream);
-
-/*! LZ4_setStreamDecode() :
- *  Use this function to instruct where to find the dictionary.
- *  Setting a size of 0 is allowed (same effect as reset).
- *  @return : 1 if OK, 0 if error
- */
-LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize);
-
-/*!
-LZ4_decompress_*_continue() :
-    These decoding functions allow decompression of multiple blocks in "streaming" mode.
-    Previously decoded blocks *must* remain available at the memory position where they were decoded (up to 64 KB)
-    In the case of a ring buffers, decoding buffer must be either :
-    - Exactly same size as encoding buffer, with same update rule (block boundaries at same positions)
-      In which case, the decoding & encoding ring buffer can have any size, including very small ones ( < 64 KB).
-    - Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
-      maxBlockSize is implementation dependent. It's the maximum size you intend to compress into a single block.
-      In which case, encoding and decoding buffers do not need to be synchronized,
-      and encoding ring buffer can have any size, including small ones ( < 64 KB).
-    - _At least_ 64 KB + 8 bytes + maxBlockSize.
-      In which case, encoding and decoding buffers do not need to be synchronized,
-      and encoding ring buffer can have any size, including larger than decoding buffer.
-    Whenever these conditions are not possible, save the last 64KB of decoded data into a safe buffer,
-    and indicate where it is saved using LZ4_setStreamDecode()
-*/
-LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxDecompressedSize);
-LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize);
-
-
-/*! LZ4_decompress_*_usingDict() :
- *  These decoding functions work the same as
- *  a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue()
- *  They are stand-alone, and don't need an LZ4_streamDecode_t structure.
- */
-LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* source, char* dest, int compressedSize, int maxDecompressedSize, const char* dictStart, int dictSize);
-LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* source, char* dest, int originalSize, const char* dictStart, int dictSize);
-
-
-/*^**********************************************
- * !!!!!!   STATIC LINKING ONLY   !!!!!!
- ***********************************************/
-/*-************************************
- *  Private definitions
- **************************************
- * Do not use these definitions.
- * They are exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
- * Using these definitions will expose code to API and/or ABI break in future versions of the library.
- **************************************/
-#define LZ4_HASHLOG   (LZ4_MEMORY_USAGE-2)
-#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
-#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG)       /* required as macro for static allocation */
-
-#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#include <stdint.h>
-
-typedef struct {
-    uint32_t hashTable[LZ4_HASH_SIZE_U32];
-    uint32_t currentOffset;
-    uint32_t initCheck;
-    const uint8_t* dictionary;
-    uint8_t* bufferStart;   /* obsolete, used for slideInputBuffer */
-    uint32_t dictSize;
-} LZ4_stream_t_internal;
-
-typedef struct {
-    const uint8_t* externalDict;
-    size_t extDictSize;
-    const uint8_t* prefixEnd;
-    size_t prefixSize;
-} LZ4_streamDecode_t_internal;
-
-#else
-
-typedef struct {
-    unsigned int hashTable[LZ4_HASH_SIZE_U32];
-    unsigned int currentOffset;
-    unsigned int initCheck;
-    const unsigned char* dictionary;
-    unsigned char* bufferStart;   /* obsolete, used for slideInputBuffer */
-    unsigned int dictSize;
-} LZ4_stream_t_internal;
-
-typedef struct {
-    const unsigned char* externalDict;
-    size_t extDictSize;
-    const unsigned char* prefixEnd;
-    size_t prefixSize;
-} LZ4_streamDecode_t_internal;
-
-#endif
-
-/*!
- * LZ4_stream_t :
- * information structure to track an LZ4 stream.
- * init this structure before first use.
- * note : only use in association with static linking !
- *        this definition is not API/ABI safe,
- *        and may change in a future version !
- */
-#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4)
-#define LZ4_STREAMSIZE     (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long))
-union LZ4_stream_u {
-    unsigned long long table[LZ4_STREAMSIZE_U64];
-    LZ4_stream_t_internal internal_donotuse;
-} ;  /* previously typedef'd to LZ4_stream_t */
-
-
-/*!
- * LZ4_streamDecode_t :
- * information structure to track an LZ4 stream during decompression.
- * init this structure  using LZ4_setStreamDecode (or memset()) before first use
- * note : only use in association with static linking !
- *        this definition is not API/ABI safe,
- *        and may change in a future version !
- */
-#define LZ4_STREAMDECODESIZE_U64  4
-#define LZ4_STREAMDECODESIZE     (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long))
-union LZ4_streamDecode_u {
-    unsigned long long table[LZ4_STREAMDECODESIZE_U64];
-    LZ4_streamDecode_t_internal internal_donotuse;
-} ;   /* previously typedef'd to LZ4_streamDecode_t */
-
-
-/*-************************************
-*  Obsolete Functions
-**************************************/
-
-/*! Deprecation warnings
-   Should deprecation warnings be a problem,
-   it is generally possible to disable them,
-   typically with -Wno-deprecated-declarations for gcc
-   or _CRT_SECURE_NO_WARNINGS in Visual.
-   Otherwise, it's also possible to define LZ4_DISABLE_DEPRECATE_WARNINGS */
-#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
-#  define LZ4_DEPRECATED(message)   /* disable deprecation warnings */
-#else
-#  define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
-#  if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
-#    define LZ4_DEPRECATED(message) [[deprecated(message)]]
-#  elif (LZ4_GCC_VERSION >= 405) || defined(__clang__)
-#    define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
-#  elif (LZ4_GCC_VERSION >= 301)
-#    define LZ4_DEPRECATED(message) __attribute__((deprecated))
-#  elif defined(_MSC_VER)
-#    define LZ4_DEPRECATED(message) __declspec(deprecated(message))
-#  else
-#    pragma message("WARNING: You need to implement LZ4_DEPRECATED for this compiler")
-#    define LZ4_DEPRECATED(message)
-#  endif
-#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
-
-/* Obsolete compression functions */
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_default() instead") int LZ4_compress               (const char* source, char* dest, int sourceSize);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_default() instead") int LZ4_compress_limitedOutput (const char* source, char* dest, int sourceSize, int maxOutputSize);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") int LZ4_compress_withState               (void* state, const char* source, char* dest, int inputSize);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") int LZ4_compress_continue                (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") int LZ4_compress_limitedOutput_continue  (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
-
-/* Obsolete decompression functions */
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_fast() instead") int LZ4_uncompress (const char* source, char* dest, int outputSize);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_safe() instead") int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
-
-/* Obsolete streaming functions; use new streaming interface whenever possible */
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStream() instead") void* LZ4_create (char* inputBuffer);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStream() instead") int   LZ4_sizeofStreamState(void);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_resetStream() instead")  int   LZ4_resetStreamState(void* state, char* inputBuffer);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_saveDict() instead")     char* LZ4_slideInputBuffer (void* state);
-
-/* Obsolete streaming decoding functions */
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
-LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
-
-#endif /* LZ4_H_2983827168210 */
-
-#if defined (__cplusplus)
-}
-#endif


[29/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

Posted by ph...@apache.org.
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_cgrp.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_cgrp.h b/thirdparty/librdkafka-0.11.1/src/rdkafka_cgrp.h
deleted file mode 100644
index 0424b5d..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_cgrp.h
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#pragma once
-
-#include "rdinterval.h"
-
-#include "rdkafka_assignor.h"
-
-/**
- * Client groups implementation
- *
- * Client groups handling for a single cgrp is assigned to a single
- * rd_kafka_broker_t object at any given time.
- * The main thread will call cgrp_serve() to serve its cgrps.
- *
- * This means that the cgrp itself does not need to be locked since it
- * is only ever used from the main thread.
- *
- */
-
-
-extern const char *rd_kafka_cgrp_join_state_names[];
-
-/**
- * Client group
- */
-typedef struct rd_kafka_cgrp_s {
-        TAILQ_ENTRY(rd_kafka_cgrp_s) rkcg_rkb_link;  /* rkb_cgrps */
-        const rd_kafkap_str_t    *rkcg_group_id;
-        rd_kafkap_str_t          *rkcg_member_id;  /* Last assigned MemberId */
-        const rd_kafkap_str_t    *rkcg_client_id;
-
-        enum {
-                /* Init state */
-                RD_KAFKA_CGRP_STATE_INIT,
-
-                /* Cgrp has been stopped. This is a final state */
-                RD_KAFKA_CGRP_STATE_TERM,
-
-                /* Query for group coordinator */
-                RD_KAFKA_CGRP_STATE_QUERY_COORD,
-
-                /* Outstanding query, awaiting response */
-                RD_KAFKA_CGRP_STATE_WAIT_COORD,
-
-                /* Wait ack from assigned cgrp manager broker thread */
-                RD_KAFKA_CGRP_STATE_WAIT_BROKER,
-
-                /* Wait for manager broker thread to connect to broker */
-                RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT,
-
-                /* Coordinator is up and manager is assigned. */
-                RD_KAFKA_CGRP_STATE_UP,
-        } rkcg_state;
-        rd_ts_t            rkcg_ts_statechange;     /* Timestamp of last
-                                                     * state change. */
-
-
-        enum {
-                RD_KAFKA_CGRP_JOIN_STATE_INIT,
-
-                /* all: JoinGroupRequest sent, awaiting response. */
-                RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN,
-
-                /* Leader: MetadataRequest sent, awaiting response. */
-                RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA,
-
-                /* Follower: SyncGroupRequest sent, awaiting response. */
-                RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC,
-
-                /* all: waiting for previous assignment to decommission */
-                RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN,
-
-                /* all: waiting for application's rebalance_cb to assign() */
-                RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_REBALANCE_CB,
-
-		/* all: waiting for application's rebalance_cb to revoke */
-                RD_KAFKA_CGRP_JOIN_STATE_WAIT_REVOKE_REBALANCE_CB,
-
-                /* all: synchronized and assigned
-                 *      may be an empty assignment. */
-                RD_KAFKA_CGRP_JOIN_STATE_ASSIGNED,
-
-		/* all: fetchers are started and operational */
-		RD_KAFKA_CGRP_JOIN_STATE_STARTED
-        } rkcg_join_state;
-
-        /* State when group leader */
-        struct {
-                char *protocol;
-                rd_kafka_group_member_t *members;
-                int member_cnt;
-        } rkcg_group_leader;
-
-        rd_kafka_q_t      *rkcg_q;                  /* Application poll queue */
-        rd_kafka_q_t      *rkcg_ops;                /* Manager ops queue */
-	rd_kafka_q_t      *rkcg_wait_coord_q;       /* Ops awaiting coord */
-	int32_t            rkcg_version;            /* Ops queue version barrier
-						     * Increased by:
-						     *  Rebalance delegation
-						     *  Assign/Unassign
-						     */
-        mtx_t              rkcg_lock;
-
-        int                rkcg_flags;
-#define RD_KAFKA_CGRP_F_TERMINATE    0x1            /* Terminate cgrp (async) */
-#define RD_KAFKA_CGRP_F_WAIT_UNASSIGN 0x4           /* Waiting for unassign
-						     * to complete */
-#define RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN 0x8       /* Send LeaveGroup when
-						     * unassign is done */
-#define RD_KAFKA_CGRP_F_SUBSCRIPTION 0x10           /* If set:
-                                                     *   subscription
-                                                     * else:
-                                                     *   static assignment */
-#define RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT  0x20  /* A Heartbeat request
-                                                     * is in transit, dont
-                                                     * send a new one. */
-#define RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION 0x40  /* Subscription contains
-                                                     * wildcards. */
-
-        rd_interval_t      rkcg_coord_query_intvl;  /* Coordinator query intvl*/
-        rd_interval_t      rkcg_heartbeat_intvl;    /* Heartbeat intvl */
-        rd_interval_t      rkcg_join_intvl;         /* JoinGroup interval */
-        rd_interval_t      rkcg_timeout_scan_intvl; /* Timeout scanner */
-
-        TAILQ_HEAD(, rd_kafka_topic_s)  rkcg_topics;/* Topics subscribed to */
-
-        rd_list_t          rkcg_toppars;            /* Toppars subscribed to*/
-
-	int                rkcg_assigned_cnt;       /* Assigned partitions */
-
-        int32_t            rkcg_coord_id;           /* Current coordinator id */
-
-        int32_t            rkcg_generation_id;      /* Current generation id */
-
-        rd_kafka_assignor_t *rkcg_assignor;         /* Selected partition
-                                                     * assignor strategy. */
-
-        rd_kafka_broker_t *rkcg_rkb;                /* Current handling broker,
-                                                     * if the coordinator broker
-                                                     * is not available this
-                                                     * will be another broker
-                                                     * that will handle the
-                                                     * querying of coordinator
-                                                     * etc.
-                                                     * Broker in this sense
-                                                     * is a broker_t object,
-                                                     * not necessarily a
-                                                     * real broker. */
-
-        /* Current subscription */
-        rd_kafka_topic_partition_list_t *rkcg_subscription;
-	/* The actual topics subscribed (after metadata+wildcard matching) */
-	rd_list_t *rkcg_subscribed_topics; /**< (rd_kafka_topic_info_t *) */
-
-        /* Current assignment */
-        rd_kafka_topic_partition_list_t *rkcg_assignment;
-
-        int rkcg_wait_unassign_cnt;                 /* Waiting for this number
-                                                     * of partitions to be
-                                                     * unassigned and
-                                                     * decommissioned before
-                                                     * transitioning to the
-                                                     * next state. */
-
-	int rkcg_wait_commit_cnt;                   /* Waiting for this number
-						     * of commits to finish. */
-
-        rd_kafka_resp_err_t rkcg_last_err;          /* Last error propagated to
-                                                     * application.
-                                                     * This is for silencing
-                                                     * same errors. */
-
-        rd_kafka_timer_t   rkcg_offset_commit_tmr;  /* Offset commit timer */
-
-        rd_kafka_t        *rkcg_rk;
-
-        rd_kafka_op_t     *rkcg_reply_rko;          /* Send reply for op
-                                                     * (OP_TERMINATE)
-                                                     * to this rko's queue. */
-
-	rd_ts_t            rkcg_ts_terminate;       /* Timestamp of when
-						     * cgrp termination was
-						     * initiated. */
-
-        /* Protected by rd_kafka_*lock() */
-        struct {
-                rd_ts_t            ts_rebalance;       /* Timestamp of
-                                                        * last rebalance */
-                int                rebalance_cnt;      /* Number of
-                                                          rebalances */
-                int                assignment_size;    /* Partition count
-                                                        * of last rebalance
-                                                        * assignment */
-        } rkcg_c;
-
-} rd_kafka_cgrp_t;
-
-
-
-
-#define rd_kafka_cgrp_lock(rkcg)    mtx_lock(&(rkcg)->rkcg_lock)
-#define rd_kafka_cgrp_unlock(rkcg)  mtx_unlock(&(rkcg)->rkcg_lock)
-
-/* Check if broker is the coordinator */
-#define RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg,rkb)          \
-        ((rkcg)->rkcg_coord_id != -1 &&                  \
-         (rkcg)->rkcg_coord_id == (rkb)->rkb_nodeid)
-
-extern const char *rd_kafka_cgrp_state_names[];
-
-void rd_kafka_cgrp_destroy_final (rd_kafka_cgrp_t *rkcg);
-rd_kafka_cgrp_t *rd_kafka_cgrp_new (rd_kafka_t *rk,
-                                    const rd_kafkap_str_t *group_id,
-                                    const rd_kafkap_str_t *client_id);
-void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg);
-
-void rd_kafka_cgrp_op (rd_kafka_cgrp_t *rkcg, rd_kafka_toppar_t *rktp,
-                       rd_kafka_replyq_t replyq, rd_kafka_op_type_t type,
-                       rd_kafka_resp_err_t err);
-void rd_kafka_cgrp_terminate0 (rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko);
-void rd_kafka_cgrp_terminate (rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq);
-
-
-rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_del (rd_kafka_cgrp_t *rkcg,
-                                                     const char *pattern);
-rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_add (rd_kafka_cgrp_t *rkcg,
-                                                     const char *pattern);
-
-int rd_kafka_cgrp_topic_check (rd_kafka_cgrp_t *rkcg, const char *topic);
-
-void rd_kafka_cgrp_set_member_id (rd_kafka_cgrp_t *rkcg, const char *member_id);
-
-void rd_kafka_cgrp_handle_heartbeat_error (rd_kafka_cgrp_t *rkcg,
-					   rd_kafka_resp_err_t err);
-
-void rd_kafka_cgrp_handle_SyncGroup (rd_kafka_cgrp_t *rkcg,
-				     rd_kafka_broker_t *rkb,
-                                     rd_kafka_resp_err_t err,
-                                     const rd_kafkap_bytes_t *member_state);
-void rd_kafka_cgrp_set_join_state (rd_kafka_cgrp_t *rkcg, int join_state);
-
-int rd_kafka_cgrp_reassign_broker (rd_kafka_cgrp_t *rkcg);
-
-void rd_kafka_cgrp_coord_query (rd_kafka_cgrp_t *rkcg,
-				const char *reason);
-void rd_kafka_cgrp_coord_dead (rd_kafka_cgrp_t *rkcg, rd_kafka_resp_err_t err,
-			       const char *reason);
-void rd_kafka_cgrp_metadata_update_check (rd_kafka_cgrp_t *rkcg, int do_join);
-#define rd_kafka_cgrp_get(rk) ((rk)->rk_cgrp)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/src/rdkafka_conf.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdkafka_conf.c b/thirdparty/librdkafka-0.11.1/src/rdkafka_conf.c
deleted file mode 100644
index a0e4d9f..0000000
--- a/thirdparty/librdkafka-0.11.1/src/rdkafka_conf.c
+++ /dev/null
@@ -1,2151 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rd.h"
-
-#include <stdlib.h>
-#include <ctype.h>
-#include <stddef.h>
-
-#include "rdkafka_int.h"
-#include "rdkafka_feature.h"
-#include "rdkafka_interceptor.h"
-#if WITH_PLUGINS
-#include "rdkafka_plugin.h"
-#endif
-
-struct rd_kafka_property {
-	rd_kafka_conf_scope_t scope;
-	const char *name;
-	enum {
-		_RK_C_STR,
-		_RK_C_INT,
-		_RK_C_S2I,  /* String to Integer mapping.
-			     * Supports limited canonical str->int mappings
-			     * using s2i[] */
-		_RK_C_S2F,  /* CSV String to Integer flag mapping (OR:ed) */
-		_RK_C_BOOL,
-		_RK_C_PTR,  /* Only settable through special set functions */
-                _RK_C_PATLIST, /* Pattern list */
-                _RK_C_KSTR, /* Kafka string */
-                _RK_C_ALIAS, /* Alias: points to other property through .sdef */
-                _RK_C_INTERNAL, /* Internal, don't expose to application */
-                _RK_C_INVALID,  /* Invalid property, used to catch known
-                                 * but unsupported Java properties. */
-	} type;
-	int   offset;
-	const char *desc;
-	int   vmin;
-	int   vmax;
-	int   vdef;        /* Default value (int) */
-	const char *sdef;  /* Default value (string) */
-        void  *pdef;       /* Default value (pointer) */
-	struct {
-		int val;
-		const char *str;
-	} s2i[16];  /* _RK_C_S2I and _RK_C_S2F */
-
-	/* Value validator (STR) */
-	int (*validate) (const struct rd_kafka_property *prop,
-			 const char *val, int ival);
-
-        /* Configuration object constructors and destructor for use when
-         * the property value itself is not used, or needs extra care. */
-        void (*ctor) (int scope, void *pconf);
-        void (*dtor) (int scope, void *pconf);
-        void (*copy) (int scope, void *pdst, const void *psrc,
-                      void *dstptr, const void *srcptr,
-                      size_t filter_cnt, const char **filter);
-
-        rd_kafka_conf_res_t (*set) (int scope, void *pconf,
-                                    const char *name, const char *value,
-                                    void *dstptr,
-                                    rd_kafka_conf_set_mode_t set_mode,
-                                    char *errstr, size_t errstr_size);
-};
-
-
-#define _RK(field)  offsetof(rd_kafka_conf_t, field)
-#define _RKT(field) offsetof(rd_kafka_topic_conf_t, field)
-
-
-static rd_kafka_conf_res_t
-rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop,
-                       char *dest, size_t *dest_size);
-
-
-/**
- * @brief Validate \p broker.version.fallback property.
- */
-static int
-rd_kafka_conf_validate_broker_version (const struct rd_kafka_property *prop,
-				       const char *val, int ival) {
-	struct rd_kafka_ApiVersion *apis;
-	size_t api_cnt;
-	return rd_kafka_get_legacy_ApiVersions(val, &apis, &api_cnt, NULL);
-}
-
-/**
- * @brief Validate that string is a single item, without delimters (, space).
- */
-static RD_UNUSED int
-rd_kafka_conf_validate_single (const struct rd_kafka_property *prop,
-				const char *val, int ival) {
-	return !strchr(val, ',') && !strchr(val, ' ');
-}
-
-
-/**
- * librdkafka configuration property definitions.
- */
-static const struct rd_kafka_property rd_kafka_properties[] = {
-	/* Global properties */
-	{ _RK_GLOBAL, "builtin.features", _RK_C_S2F, _RK(builtin_features),
-	"Indicates the builtin features for this build of librdkafka. "
-	"An application can either query this value or attempt to set it "
-	"with its list of required features to check for library support.",
-	0, 0x7fffffff, 0xffff,
-	.s2i = {
-#if WITH_ZLIB
-		{ 0x1, "gzip" },
-#endif
-#if WITH_SNAPPY
-		{ 0x2, "snappy" },
-#endif
-#if WITH_SSL
-		{ 0x4, "ssl" },
-#endif
-                { 0x8, "sasl" },
-		{ 0x10, "regex" },
-		{ 0x20, "lz4" },
-#if defined(_MSC_VER) || WITH_SASL_CYRUS
-                { 0x40, "sasl_gssapi" },
-#endif
-                { 0x80, "sasl_plain" },
-#if WITH_SASL_SCRAM
-                { 0x100, "sasl_scram" },
-#endif
-#if WITH_PLUGINS
-                { 0x200, "plugins" },
-#endif
-		{ 0, NULL }
-		}
-	},
-	{ _RK_GLOBAL, "client.id", _RK_C_STR, _RK(client_id_str),
-	  "Client identifier.",
-	  .sdef =  "rdkafka" },
-	{ _RK_GLOBAL, "metadata.broker.list", _RK_C_STR, _RK(brokerlist),
-	  "Initial list of brokers as a CSV list of broker host or host:port. "
-	  "The application may also use `rd_kafka_brokers_add()` to add "
-	  "brokers during runtime." },
-	{ _RK_GLOBAL, "bootstrap.servers", _RK_C_ALIAS, 0,
-	  "See metadata.broker.list",
-	  .sdef = "metadata.broker.list" },
-	{ _RK_GLOBAL, "message.max.bytes", _RK_C_INT, _RK(max_msg_size),
-	  "Maximum Kafka protocol request message size.",
-	  1000, 1000000000, 1000000 },
-	{ _RK_GLOBAL, "message.copy.max.bytes", _RK_C_INT,
-	  _RK(msg_copy_max_size),
-	  "Maximum size for message to be copied to buffer. "
-	  "Messages larger than this will be passed by reference (zero-copy) "
-	  "at the expense of larger iovecs.",
-	  0, 1000000000, 0xffff },
-	{ _RK_GLOBAL, "receive.message.max.bytes", _RK_C_INT,
-          _RK(recv_max_msg_size),
-	  "Maximum Kafka protocol response message size. "
-	  "This is a safety precaution to avoid memory exhaustion in case of "
-	  "protocol hickups. "
-          "The value should be at least fetch.message.max.bytes * number of "
-          "partitions consumed from + messaging overhead (e.g. 200000 bytes).",
-	  1000, 1000000000, 100000000 },
-	{ _RK_GLOBAL, "max.in.flight.requests.per.connection", _RK_C_INT,
-	  _RK(max_inflight),
-	  "Maximum number of in-flight requests per broker connection. "
-	  "This is a generic property applied to all broker communication, "
-	  "however it is primarily relevant to produce requests. "
-	  "In particular, note that other mechanisms limit the number "
-	  "of outstanding consumer fetch request per broker to one.",
-	  1, 1000000, 1000000 },
-        { _RK_GLOBAL, "max.in.flight", _RK_C_ALIAS,
-          .sdef = "max.in.flight.requests.per.connection" },
-	{ _RK_GLOBAL, "metadata.request.timeout.ms", _RK_C_INT,
-	  _RK(metadata_request_timeout_ms),
-	  "Non-topic request timeout in milliseconds. "
-	  "This is for metadata requests, etc.",
-	  10, 900*1000, 60*1000},
-	{ _RK_GLOBAL, "topic.metadata.refresh.interval.ms", _RK_C_INT,
-	  _RK(metadata_refresh_interval_ms),
-	  "Topic metadata refresh interval in milliseconds. "
-	  "The metadata is automatically refreshed on error and connect. "
-	  "Use -1 to disable the intervalled refresh.",
-	  -1, 3600*1000, 5*60*1000 },
-	{ _RK_GLOBAL, "metadata.max.age.ms", _RK_C_INT,
-          _RK(metadata_max_age_ms),
-          "Metadata cache max age. "
-          "Defaults to metadata.refresh.interval.ms * 3",
-          1, 24*3600*1000, -1 },
-        { _RK_GLOBAL, "topic.metadata.refresh.fast.interval.ms", _RK_C_INT,
-          _RK(metadata_refresh_fast_interval_ms),
-          "When a topic loses its leader a new metadata request will be "
-          "enqueued with this initial interval, exponentially increasing "
-          "until the topic metadata has been refreshed. "
-          "This is used to recover quickly from transitioning leader brokers.",
-          1, 60*1000, 250 },
-        { _RK_GLOBAL, "topic.metadata.refresh.fast.cnt", _RK_C_INT,
-          _RK(metadata_refresh_fast_cnt),
-          "*Deprecated: No longer used.*",
-          0, 1000, 10 },
-        { _RK_GLOBAL, "topic.metadata.refresh.sparse", _RK_C_BOOL,
-          _RK(metadata_refresh_sparse),
-          "Sparse metadata requests (consumes less network bandwidth)",
-          0, 1, 1 },
-        { _RK_GLOBAL, "topic.blacklist", _RK_C_PATLIST,
-          _RK(topic_blacklist),
-          "Topic blacklist, a comma-separated list of regular expressions "
-          "for matching topic names that should be ignored in "
-          "broker metadata information as if the topics did not exist." },
-	{ _RK_GLOBAL, "debug", _RK_C_S2F, _RK(debug),
-	  "A comma-separated list of debug contexts to enable. "
-	  "Debugging the Producer: broker,topic,msg. Consumer: cgrp,topic,fetch",
-	  .s2i = {
-                        { RD_KAFKA_DBG_GENERIC,  "generic" },
-			{ RD_KAFKA_DBG_BROKER,   "broker" },
-			{ RD_KAFKA_DBG_TOPIC,    "topic" },
-			{ RD_KAFKA_DBG_METADATA, "metadata" },
-			{ RD_KAFKA_DBG_QUEUE,    "queue" },
-			{ RD_KAFKA_DBG_MSG,      "msg" },
-			{ RD_KAFKA_DBG_PROTOCOL, "protocol" },
-                        { RD_KAFKA_DBG_CGRP,     "cgrp" },
-			{ RD_KAFKA_DBG_SECURITY, "security" },
-			{ RD_KAFKA_DBG_FETCH,    "fetch" },
-			{ RD_KAFKA_DBG_FEATURE,  "feature" },
-                        { RD_KAFKA_DBG_INTERCEPTOR, "interceptor" },
-                        { RD_KAFKA_DBG_PLUGIN,   "plugin" },
-			{ RD_KAFKA_DBG_ALL,      "all" },
-		} },
-	{ _RK_GLOBAL, "socket.timeout.ms", _RK_C_INT, _RK(socket_timeout_ms),
-	  "Timeout for network requests.",
-	  10, 300*1000, 60*1000 },
-	{ _RK_GLOBAL, "socket.blocking.max.ms", _RK_C_INT,
-	  _RK(socket_blocking_max_ms),
-	  "Maximum time a broker socket operation may block. "
-          "A lower value improves responsiveness at the expense of "
-          "slightly higher CPU usage. **Deprecated**",
-	  1, 60*1000, 1000 },
-	{ _RK_GLOBAL, "socket.send.buffer.bytes", _RK_C_INT,
-	  _RK(socket_sndbuf_size),
-	  "Broker socket send buffer size. System default is used if 0.",
-	  0, 100000000, 0 },
-	{ _RK_GLOBAL, "socket.receive.buffer.bytes", _RK_C_INT,
-	  _RK(socket_rcvbuf_size),
-	  "Broker socket receive buffer size. System default is used if 0.",
-	  0, 100000000, 0 },
-	{ _RK_GLOBAL, "socket.keepalive.enable", _RK_C_BOOL,
-	  _RK(socket_keepalive),
-          "Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets",
-          0, 1, 0 },
-	{ _RK_GLOBAL, "socket.nagle.disable", _RK_C_BOOL,
-	  _RK(socket_nagle_disable),
-          "Disable the Nagle algorithm (TCP_NODELAY).",
-          0, 1, 0 },
-        { _RK_GLOBAL, "socket.max.fails", _RK_C_INT,
-          _RK(socket_max_fails),
-          "Disconnect from broker when this number of send failures "
-          "(e.g., timed out requests) is reached. Disable with 0. "
-          "NOTE: The connection is automatically re-established.",
-          0, 1000000, 3 },
-	{ _RK_GLOBAL, "broker.address.ttl", _RK_C_INT,
-	  _RK(broker_addr_ttl),
-	  "How long to cache the broker address resolving "
-          "results (milliseconds).",
-	  0, 86400*1000, 1*1000 },
-        { _RK_GLOBAL, "broker.address.family", _RK_C_S2I,
-          _RK(broker_addr_family),
-          "Allowed broker IP address families: any, v4, v6",
-          .vdef = AF_UNSPEC,
-          .s2i = {
-                        { AF_UNSPEC, "any" },
-                        { AF_INET, "v4" },
-                        { AF_INET6, "v6" },
-                } },
-        { _RK_GLOBAL, "reconnect.backoff.jitter.ms", _RK_C_INT,
-          _RK(reconnect_jitter_ms),
-          "Throttle broker reconnection attempts by this value +-50%.",
-          0, 60*60*1000, 500 },
-	{ _RK_GLOBAL, "statistics.interval.ms", _RK_C_INT,
-	  _RK(stats_interval_ms),
-	  "librdkafka statistics emit interval. The application also needs to "
-	  "register a stats callback using `rd_kafka_conf_set_stats_cb()`. "
-	  "The granularity is 1000ms. A value of 0 disables statistics.",
-	  0, 86400*1000, 0 },
-	{ _RK_GLOBAL, "enabled_events", _RK_C_INT,
-	  _RK(enabled_events),
-	  "See `rd_kafka_conf_set_events()`",
-	  0, 0x7fffffff, 0 },
-	{ _RK_GLOBAL, "error_cb", _RK_C_PTR,
-	  _RK(error_cb),
-	  "Error callback (set with rd_kafka_conf_set_error_cb())" },
-	{ _RK_GLOBAL, "throttle_cb", _RK_C_PTR,
-	  _RK(throttle_cb),
-	  "Throttle callback (set with rd_kafka_conf_set_throttle_cb())" },
-	{ _RK_GLOBAL, "stats_cb", _RK_C_PTR,
-	  _RK(stats_cb),
-	  "Statistics callback (set with rd_kafka_conf_set_stats_cb())" },
-	{ _RK_GLOBAL, "log_cb", _RK_C_PTR,
-	  _RK(log_cb),
-	  "Log callback (set with rd_kafka_conf_set_log_cb())",
-          .pdef = rd_kafka_log_print },
-        { _RK_GLOBAL, "log_level", _RK_C_INT,
-          _RK(log_level),
-          "Logging level (syslog(3) levels)",
-          0, 7, 6 },
-        { _RK_GLOBAL, "log.queue", _RK_C_BOOL, _RK(log_queue),
-          "Disable spontaneous log_cb from internal librdkafka "
-          "threads, instead enqueue log messages on queue set with "
-          "`rd_kafka_set_log_queue()` and serve log callbacks or "
-          "events through the standard poll APIs. "
-          "**NOTE**: Log messages will linger in a temporary queue "
-          "until the log queue has been set.",
-          0, 1, 0 },
-	{ _RK_GLOBAL, "log.thread.name", _RK_C_BOOL,
-	  _RK(log_thread_name),
-	  "Print internal thread name in log messages "
-	  "(useful for debugging librdkafka internals)",
-	  0, 1, 1 },
-	{ _RK_GLOBAL, "log.connection.close", _RK_C_BOOL,
-	  _RK(log_connection_close),
-	  "Log broker disconnects. "
-          "It might be useful to turn this off when interacting with "
-          "0.9 brokers with an aggressive `connection.max.idle.ms` value.",
-	  0, 1, 1 },
-        { _RK_GLOBAL, "socket_cb", _RK_C_PTR,
-          _RK(socket_cb),
-          "Socket creation callback to provide race-free CLOEXEC",
-          .pdef =
-#ifdef __linux__
-          rd_kafka_socket_cb_linux
-#else
-          rd_kafka_socket_cb_generic
-#endif
-        },
-        { _RK_GLOBAL, "connect_cb", _RK_C_PTR,
-          _RK(connect_cb),
-          "Socket connect callback",
-        },
-        { _RK_GLOBAL, "closesocket_cb", _RK_C_PTR,
-          _RK(closesocket_cb),
-          "Socket close callback",
-        },
-        { _RK_GLOBAL, "open_cb", _RK_C_PTR,
-          _RK(open_cb),
-          "File open callback to provide race-free CLOEXEC",
-          .pdef =
-#ifdef __linux__
-          rd_kafka_open_cb_linux
-#else
-          rd_kafka_open_cb_generic
-#endif
-        },
-	{ _RK_GLOBAL, "opaque", _RK_C_PTR,
-	  _RK(opaque),
-	  "Application opaque (set with rd_kafka_conf_set_opaque())" },
-        { _RK_GLOBAL, "default_topic_conf", _RK_C_PTR,
-          _RK(topic_conf),
-          "Default topic configuration for automatically subscribed topics" },
-	{ _RK_GLOBAL, "internal.termination.signal", _RK_C_INT,
-	  _RK(term_sig),
-	  "Signal that librdkafka will use to quickly terminate on "
-	  "rd_kafka_destroy(). If this signal is not set then there will be a "
-	  "delay before rd_kafka_wait_destroyed() returns true "
-	  "as internal threads are timing out their system calls. "
-	  "If this signal is set however the delay will be minimal. "
-	  "The application should mask this signal as an internal "
-	  "signal handler is installed.",
-	  0, 128, 0 },
-	{ _RK_GLOBAL, "api.version.request", _RK_C_BOOL,
-	  _RK(api_version_request),
-	  "Request broker's supported API versions to adjust functionality to "
-	  "available protocol features. If set to false, or the "
-          "ApiVersionRequest fails, the fallback version "
-	  "`broker.version.fallback` will be used. "
-	  "**NOTE**: Depends on broker version >=0.10.0. If the request is not "
-	  "supported by (an older) broker the `broker.version.fallback` fallback is used.",
-	  0, 1, 1 },
-	{ _RK_GLOBAL, "api.version.request.timeout.ms", _RK_C_INT,
-	  _RK(api_version_request_timeout_ms),
-	  "Timeout for broker API version requests.",
-	  1, 5*60*1000, 10*1000 },
-	{ _RK_GLOBAL, "api.version.fallback.ms", _RK_C_INT,
-	  _RK(api_version_fallback_ms),
-	  "Dictates how long the `broker.version.fallback` fallback is used "
-	  "in the case the ApiVersionRequest fails. "
-	  "**NOTE**: The ApiVersionRequest is only issued when a new connection "
-	  "to the broker is made (such as after an upgrade).",
-	  0, 86400*7*1000, 20*60*1000 /* longer than default Idle timeout (10m)*/ },
-
-	{ _RK_GLOBAL, "broker.version.fallback", _RK_C_STR,
-	  _RK(broker_version_fallback),
-	  "Older broker versions (<0.10.0) provides no way for a client to query "
-	  "for supported protocol features "
-	  "(ApiVersionRequest, see `api.version.request`) making it impossible "
-	  "for the client to know what features it may use. "
-	  "As a workaround a user may set this property to the expected broker "
-	  "version and the client will automatically adjust its feature set "
-	  "accordingly if the ApiVersionRequest fails (or is disabled). "
-	  "The fallback broker version will be used for `api.version.fallback.ms`. "
-          "Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value, "
-          "such as 0.10.2.1, enables ApiVersionRequests.",
-	  .sdef = "0.9.0",
-	  .validate = rd_kafka_conf_validate_broker_version },
-
-	/* Security related global properties */
-	{ _RK_GLOBAL, "security.protocol", _RK_C_S2I,
-	  _RK(security_protocol),
-	  "Protocol used to communicate with brokers.",
-	  .vdef = RD_KAFKA_PROTO_PLAINTEXT,
-	  .s2i = {
-			{ RD_KAFKA_PROTO_PLAINTEXT, "plaintext" },
-#if WITH_SSL
-			{ RD_KAFKA_PROTO_SSL, "ssl" },
-#endif
-			{ RD_KAFKA_PROTO_SASL_PLAINTEXT, "sasl_plaintext" },
-#if WITH_SSL
-			{ RD_KAFKA_PROTO_SASL_SSL, "sasl_ssl" },
-#endif
-			{ 0, NULL }
-		} },
-
-#if WITH_SSL
-	{ _RK_GLOBAL, "ssl.cipher.suites", _RK_C_STR,
-	  _RK(ssl.cipher_suites),
-	  "A cipher suite is a named combination of authentication, "
-	  "encryption, MAC and key exchange algorithm used to negotiate the "
-	  "security settings for a network connection using TLS or SSL network "
-	  "protocol. See manual page for `ciphers(1)` and "
-	  "`SSL_CTX_set_cipher_list(3)."
-	},
-	{ _RK_GLOBAL, "ssl.key.location", _RK_C_STR,
-	  _RK(ssl.key_location),
-	  "Path to client's private key (PEM) used for authentication."
-	},
-	{ _RK_GLOBAL, "ssl.key.password", _RK_C_STR,
-	  _RK(ssl.key_password),
-	  "Private key passphrase"
-	},
-	{ _RK_GLOBAL, "ssl.certificate.location", _RK_C_STR,
-	  _RK(ssl.cert_location),
-	  "Path to client's public key (PEM) used for authentication."
-	},
-	{ _RK_GLOBAL, "ssl.ca.location", _RK_C_STR,
-	  _RK(ssl.ca_location),
-	  "File or directory path to CA certificate(s) for verifying "
-	  "the broker's key."
-	},
-	{ _RK_GLOBAL, "ssl.crl.location", _RK_C_STR,
-	  _RK(ssl.crl_location),
-	  "Path to CRL for verifying broker's certificate validity."
-	},
-#endif /* WITH_SSL */
-
-        /* Point user in the right direction if they try to apply
-         * Java client SSL / JAAS properties. */
-        { _RK_GLOBAL, "ssl.keystore.location", _RK_C_INVALID,
-          _RK(dummy),
-          "Java KeyStores are not supported, use `ssl.key.location` and "
-          "a private key (PEM) file instead. "
-          "See https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka for more information."
-        },
-        { _RK_GLOBAL, "ssl.truststore.location", _RK_C_INVALID,
-          _RK(dummy),
-          "Java TrustStores are not supported, use `ssl.ca.location` "
-          "and a certificate file instead. "
-          "See https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka for more information."
-        },
-        { _RK_GLOBAL, "sasl.jaas.config", _RK_C_INVALID,
-          _RK(dummy),
-          "Java JAAS configuration is not supported, see "
-          "https://github.com/edenhill/librdkafka/wiki/Using-SASL-with-librdkafka "
-          "for more information."
-        },
-
-	{_RK_GLOBAL,"sasl.mechanisms", _RK_C_STR,
-	 _RK(sasl.mechanisms),
-	 "SASL mechanism to use for authentication. "
-	 "Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. "
-	 "**NOTE**: Despite the name only one mechanism must be configured.",
-	 .sdef = "GSSAPI",
-	 .validate = rd_kafka_conf_validate_single },
-	{ _RK_GLOBAL, "sasl.kerberos.service.name", _RK_C_STR,
-	  _RK(sasl.service_name),
-	  "Kerberos principal name that Kafka runs as.",
-	  .sdef = "kafka" },
-	{ _RK_GLOBAL, "sasl.kerberos.principal", _RK_C_STR,
-	  _RK(sasl.principal),
-	  "This client's Kerberos principal name.",
-	  .sdef = "kafkaclient" },
-#ifndef _MSC_VER
-	{ _RK_GLOBAL, "sasl.kerberos.kinit.cmd", _RK_C_STR,
-	  _RK(sasl.kinit_cmd),
-	  "Full kerberos kinit command string, %{config.prop.name} is replaced "
-	  "by corresponding config object value, %{broker.name} returns the "
-	  "broker's hostname.",
-	  .sdef = "kinit -S \"%{sasl.kerberos.service.name}/%{broker.name}\" "
-	  "-k -t \"%{sasl.kerberos.keytab}\" %{sasl.kerberos.principal}" },
-	{ _RK_GLOBAL, "sasl.kerberos.keytab", _RK_C_STR,
-	  _RK(sasl.keytab),
-	  "Path to Kerberos keytab file. Uses system default if not set."
-	  "**NOTE**: This is not automatically used but must be added to the "
-	  "template in sasl.kerberos.kinit.cmd as "
-	  "` ... -t %{sasl.kerberos.keytab}`." },
-	{ _RK_GLOBAL, "sasl.kerberos.min.time.before.relogin", _RK_C_INT,
-	  _RK(sasl.relogin_min_time),
-	  "Minimum time in milliseconds between key refresh attempts.",
-	  1, 86400*1000, 60*1000 },
-#endif
-	{ _RK_GLOBAL, "sasl.username", _RK_C_STR,
-	  _RK(sasl.username),
-	  "SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms" },
-	{ _RK_GLOBAL, "sasl.password", _RK_C_STR,
-	  _RK(sasl.password),
-	  "SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism" },
-
-#if WITH_PLUGINS
-        /* Plugins */
-        { _RK_GLOBAL, "plugin.library.paths", _RK_C_STR,
-          _RK(plugin_paths),
-          "List of plugin libaries to load (; separated). "
-          "The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the "
-          "platform-specific extension (such as .dll or .so) will be appended automatically.",
-          .set = rd_kafka_plugins_conf_set },
-#endif
-
-        /* Interceptors are added through specific API and not exposed
-         * as configuration properties.
-         * The interceptor property must be defined after plugin.library.paths
-         * so that the plugin libraries are properly loaded before
-         * interceptors are configured when duplicating configuration objects.*/
-        { _RK_GLOBAL, "interceptors", _RK_C_INTERNAL,
-          _RK(interceptors),
-          "Interceptors added through rd_kafka_conf_interceptor_add_..() "
-          "and any configuration handled by interceptors.",
-          .ctor = rd_kafka_conf_interceptor_ctor,
-          .dtor = rd_kafka_conf_interceptor_dtor,
-          .copy = rd_kafka_conf_interceptor_copy },
-
-        /* Global client group properties */
-        { _RK_GLOBAL|_RK_CGRP, "group.id", _RK_C_STR,
-          _RK(group_id_str),
-          "Client group id string. All clients sharing the same group.id "
-          "belong to the same group." },
-        { _RK_GLOBAL|_RK_CGRP, "partition.assignment.strategy", _RK_C_STR,
-          _RK(partition_assignment_strategy),
-          "Name of partition assignment strategy to use when elected "
-          "group leader assigns partitions to group members.",
-	  .sdef = "range,roundrobin" },
-        { _RK_GLOBAL|_RK_CGRP, "session.timeout.ms", _RK_C_INT,
-          _RK(group_session_timeout_ms),
-          "Client group session and failure detection timeout.",
-          1, 3600*1000, 30*1000 },
-        { _RK_GLOBAL|_RK_CGRP, "heartbeat.interval.ms", _RK_C_INT,
-          _RK(group_heartbeat_intvl_ms),
-          "Group session keepalive heartbeat interval.",
-          1, 3600*1000, 1*1000 },
-        { _RK_GLOBAL|_RK_CGRP, "group.protocol.type", _RK_C_KSTR,
-          _RK(group_protocol_type),
-          "Group protocol type",
-          .sdef = "consumer" },
-        { _RK_GLOBAL|_RK_CGRP, "coordinator.query.interval.ms", _RK_C_INT,
-          _RK(coord_query_intvl_ms),
-          "How often to query for the current client group coordinator. "
-          "If the currently assigned coordinator is down the configured "
-          "query interval will be divided by ten to more quickly recover "
-          "in case of coordinator reassignment.",
-          1, 3600*1000, 10*60*1000 },
-
-        /* Global consumer properties */
-        { _RK_GLOBAL|_RK_CONSUMER, "enable.auto.commit", _RK_C_BOOL,
-          _RK(enable_auto_commit),
-          "Automatically and periodically commit offsets in the background.",
-          0, 1, 1 },
-        { _RK_GLOBAL|_RK_CONSUMER, "auto.commit.interval.ms", _RK_C_INT,
-	  _RK(auto_commit_interval_ms),
-	  "The frequency in milliseconds that the consumer offsets "
-	  "are committed (written) to offset storage. (0 = disable). "
-          "This setting is used by the high-level consumer.",
-          0, 86400*1000, 5*1000 },
-        { _RK_GLOBAL|_RK_CONSUMER, "enable.auto.offset.store", _RK_C_BOOL,
-          _RK(enable_auto_offset_store),
-          "Automatically store offset of last message provided to "
-	  "application.",
-          0, 1, 1 },
-	{ _RK_GLOBAL|_RK_CONSUMER, "queued.min.messages", _RK_C_INT,
-	  _RK(queued_min_msgs),
-	  "Minimum number of messages per topic+partition "
-          "librdkafka tries to maintain in the local consumer queue.",
-	  1, 10000000, 100000 },
-	{ _RK_GLOBAL|_RK_CONSUMER, "queued.max.messages.kbytes", _RK_C_INT,
-	  _RK(queued_max_msg_kbytes),
-          "Maximum number of kilobytes per topic+partition in the "
-          "local consumer queue. "
-	  "This value may be overshot by fetch.message.max.bytes. "
-	  "This property has higher priority than queued.min.messages.",
-          1, 1000000000, 1000000 /* 1 Gig */ },
-	{ _RK_GLOBAL|_RK_CONSUMER, "fetch.wait.max.ms", _RK_C_INT,
-	  _RK(fetch_wait_max_ms),
-	  "Maximum time the broker may wait to fill the response "
-	  "with fetch.min.bytes.",
-	  0, 300*1000, 100 },
-        { _RK_GLOBAL|_RK_CONSUMER, "fetch.message.max.bytes", _RK_C_INT,
-          _RK(fetch_msg_max_bytes),
-          "Initial maximum number of bytes per topic+partition to request when "
-          "fetching messages from the broker. "
-	  "If the client encounters a message larger than this value "
-	  "it will gradually try to increase it until the "
-	  "entire message can be fetched.",
-          1, 1000000000, 1024*1024 },
-	{ _RK_GLOBAL|_RK_CONSUMER, "max.partition.fetch.bytes", _RK_C_ALIAS,
-	  .sdef = "fetch.message.max.bytes" },
-	{ _RK_GLOBAL|_RK_CONSUMER, "fetch.min.bytes", _RK_C_INT,
-	  _RK(fetch_min_bytes),
-	  "Minimum number of bytes the broker responds with. "
-	  "If fetch.wait.max.ms expires the accumulated data will "
-	  "be sent to the client regardless of this setting.",
-	  1, 100000000, 1 },
-	{ _RK_GLOBAL|_RK_CONSUMER, "fetch.error.backoff.ms", _RK_C_INT,
-	  _RK(fetch_error_backoff_ms),
-	  "How long to postpone the next fetch request for a "
-	  "topic+partition in case of a fetch error.",
-	  0, 300*1000, 500 },
-        { _RK_GLOBAL|_RK_CONSUMER, "offset.store.method", _RK_C_S2I,
-          _RK(offset_store_method),
-          "Offset commit store method: "
-          "'file' - local file store (offset.store.path, et.al), "
-          "'broker' - broker commit store "
-          "(requires Apache Kafka 0.8.2 or later on the broker).",
-          .vdef = RD_KAFKA_OFFSET_METHOD_BROKER,
-          .s2i = {
-                        { RD_KAFKA_OFFSET_METHOD_NONE, "none" },
-                        { RD_KAFKA_OFFSET_METHOD_FILE, "file" },
-                        { RD_KAFKA_OFFSET_METHOD_BROKER, "broker" }
-                }
-        },
-        { _RK_GLOBAL|_RK_CONSUMER, "consume_cb", _RK_C_PTR,
-	  _RK(consume_cb),
-	  "Message consume callback (set with rd_kafka_conf_set_consume_cb())"},
-	{ _RK_GLOBAL|_RK_CONSUMER, "rebalance_cb", _RK_C_PTR,
-	  _RK(rebalance_cb),
-	  "Called after consumer group has been rebalanced "
-          "(set with rd_kafka_conf_set_rebalance_cb())" },
-	{ _RK_GLOBAL|_RK_CONSUMER, "offset_commit_cb", _RK_C_PTR,
-	  _RK(offset_commit_cb),
-	  "Offset commit result propagation callback. "
-          "(set with rd_kafka_conf_set_offset_commit_cb())" },
-	{ _RK_GLOBAL|_RK_CONSUMER, "enable.partition.eof", _RK_C_BOOL,
-	  _RK(enable_partition_eof),
-	  "Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the "
-	  "consumer reaches the end of a partition.",
-	  0, 1, 1 },
-        { _RK_GLOBAL|_RK_CONSUMER, "check.crcs", _RK_C_BOOL,
-          _RK(check_crcs),
-          "Verify CRC32 of consumed messages, ensuring no on-the-wire or "
-          "on-disk corruption to the messages occurred. This check comes "
-          "at slightly increased CPU usage.",
-          0, 1, 0 },
-	/* Global producer properties */
-	{ _RK_GLOBAL|_RK_PRODUCER, "queue.buffering.max.messages", _RK_C_INT,
-	  _RK(queue_buffering_max_msgs),
-	  "Maximum number of messages allowed on the producer queue.",
-	  1, 10000000, 100000 },
-	{ _RK_GLOBAL|_RK_PRODUCER, "queue.buffering.max.kbytes", _RK_C_INT,
-	  _RK(queue_buffering_max_kbytes),
-	  "Maximum total message size sum allowed on the producer queue. "
-	  "This property has higher priority than queue.buffering.max.messages.",
-	  1, INT_MAX/1024, 4000000 },
-	{ _RK_GLOBAL|_RK_PRODUCER, "queue.buffering.max.ms", _RK_C_INT,
-	  _RK(buffering_max_ms),
-	  "Delay in milliseconds to wait for messages in the producer queue "
-          "to accumulate before constructing message batches (MessageSets) to "
-          "transmit to brokers. "
-	  "A higher value allows larger and more effective "
-          "(less overhead, improved compression) batches of messages to "
-          "accumulate at the expense of increased message delivery latency.",
-	  0, 900*1000, 0 },
-        { _RK_GLOBAL|_RK_PRODUCER, "linger.ms", _RK_C_ALIAS,
-          .sdef = "queue.buffering.max.ms" },
-	{ _RK_GLOBAL|_RK_PRODUCER, "message.send.max.retries", _RK_C_INT,
-	  _RK(max_retries),
-	  "How many times to retry sending a failing MessageSet. "
-	  "**Note:** retrying may cause reordering.",
-          0, 10000000, 2 },
-          { _RK_GLOBAL | _RK_PRODUCER, "retries", _RK_C_ALIAS,
-                .sdef = "message.send.max.retries" },
-	{ _RK_GLOBAL|_RK_PRODUCER, "retry.backoff.ms", _RK_C_INT,
-	  _RK(retry_backoff_ms),
-	  "The backoff time in milliseconds before retrying a message send.",
-	  1, 300*1000, 100 },
-	{ _RK_GLOBAL|_RK_PRODUCER, "compression.codec", _RK_C_S2I,
-	  _RK(compression_codec),
-	  "compression codec to use for compressing message sets. "
-	  "This is the default value for all topics, may be overriden by "
-	  "the topic configuration property `compression.codec`. ",
-	  .vdef = RD_KAFKA_COMPRESSION_NONE,
-	  .s2i = {
-			{ RD_KAFKA_COMPRESSION_NONE,   "none" },
-#if WITH_ZLIB
-			{ RD_KAFKA_COMPRESSION_GZIP,   "gzip" },
-#endif
-#if WITH_SNAPPY
-			{ RD_KAFKA_COMPRESSION_SNAPPY, "snappy" },
-#endif
-                        { RD_KAFKA_COMPRESSION_LZ4, "lz4" },
-			{ 0 }
-		} },
-	{ _RK_GLOBAL|_RK_PRODUCER, "batch.num.messages", _RK_C_INT,
-	  _RK(batch_num_messages),
-	  "Maximum number of messages batched in one MessageSet. "
-	  "The total MessageSet size is also limited by message.max.bytes.",
-	  1, 1000000, 10000 },
-	{ _RK_GLOBAL|_RK_PRODUCER, "delivery.report.only.error", _RK_C_BOOL,
-	  _RK(dr_err_only),
-	  "Only provide delivery reports for failed messages.",
-	  0, 1, 0 },
-	{ _RK_GLOBAL|_RK_PRODUCER, "dr_cb", _RK_C_PTR,
-	  _RK(dr_cb),
-	  "Delivery report callback (set with rd_kafka_conf_set_dr_cb())" },
-	{ _RK_GLOBAL|_RK_PRODUCER, "dr_msg_cb", _RK_C_PTR,
-	  _RK(dr_msg_cb),
-	  "Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb())" },
-
-
-        /*
-         * Topic properties
-         */
-
-        /* Topic producer properties */
-	{ _RK_TOPIC|_RK_PRODUCER, "request.required.acks", _RK_C_INT,
-	  _RKT(required_acks),
-	  "This field indicates how many acknowledgements the leader broker "
-	  "must receive from ISR brokers before responding to the request: "
-	  "*0*=Broker does not send any response/ack to client, "
-	  "*1*=Only the leader broker will need to ack the message, "
-	  "*-1* or *all*=broker will block until message is committed by all "
-	  "in sync replicas (ISRs) or broker's `in.sync.replicas` "
-	  "setting before sending response. ",
-	  -1, 1000, 1,
-	  .s2i = {
-			{ -1, "all" },
-		}
-	},
-	{ _RK_TOPIC | _RK_PRODUCER, "acks", _RK_C_ALIAS,
-	  .sdef = "request.required.acks" },
-
-	{ _RK_TOPIC|_RK_PRODUCER, "request.timeout.ms", _RK_C_INT,
-	  _RKT(request_timeout_ms),
-	  "The ack timeout of the producer request in milliseconds. "
-	  "This value is only enforced by the broker and relies "
-	  "on `request.required.acks` being != 0.",
-	  1, 900*1000, 5*1000 },
-	{ _RK_TOPIC|_RK_PRODUCER, "message.timeout.ms", _RK_C_INT,
-	  _RKT(message_timeout_ms),
-	  "Local message timeout. "
-	  "This value is only enforced locally and limits the time a "
-	  "produced message waits for successful delivery. "
-          "A time of 0 is infinite.",
-	  0, 900*1000, 300*1000 },
-        { _RK_TOPIC|_RK_PRODUCER, "produce.offset.report", _RK_C_BOOL,
-          _RKT(produce_offset_report),
-          "Report offset of produced message back to application. "
-          "The application must be use the `dr_msg_cb` to retrieve the offset "
-          "from `rd_kafka_message_t.offset`.",
-          0, 1, 0 },
-	{ _RK_TOPIC|_RK_PRODUCER, "partitioner_cb", _RK_C_PTR,
-	  _RKT(partitioner),
-	  "Partitioner callback "
-	  "(set with rd_kafka_topic_conf_set_partitioner_cb())" },
-	{ _RK_TOPIC, "opaque", _RK_C_PTR,
-	  _RKT(opaque),
-	  "Application opaque (set with rd_kafka_topic_conf_set_opaque())" },
-	{ _RK_TOPIC | _RK_PRODUCER, "compression.codec", _RK_C_S2I,
-	  _RKT(compression_codec),
-	  "Compression codec to use for compressing message sets. ",
-	  .vdef = RD_KAFKA_COMPRESSION_INHERIT,
-	  .s2i = {
-		  { RD_KAFKA_COMPRESSION_NONE, "none" },
-#if WITH_ZLIB
-		  { RD_KAFKA_COMPRESSION_GZIP, "gzip" },
-#endif
-#if WITH_SNAPPY
-		  { RD_KAFKA_COMPRESSION_SNAPPY, "snappy" },
-#endif
-		  { RD_KAFKA_COMPRESSION_LZ4, "lz4" },
-		  { RD_KAFKA_COMPRESSION_INHERIT, "inherit" },
-		  { 0 }
-		} },
-
-
-        /* Topic consumer properties */
-	{ _RK_TOPIC|_RK_CONSUMER, "auto.commit.enable", _RK_C_BOOL,
-	  _RKT(auto_commit),
-	  "If true, periodically commit offset of the last message handed "
-	  "to the application. This committed offset will be used when the "
-	  "process restarts to pick up where it left off. "
-	  "If false, the application will have to call "
-	  "`rd_kafka_offset_store()` to store an offset (optional). "
-          "**NOTE:** This property should only be used with the simple "
-          "legacy consumer, when using the high-level KafkaConsumer the global "
-          "`enable.auto.commit` property must be used instead. "
-	  "**NOTE:** There is currently no zookeeper integration, offsets "
-	  "will be written to broker or local file according to "
-          "offset.store.method.",
-	  0, 1, 1 },
-	{ _RK_TOPIC|_RK_CONSUMER, "enable.auto.commit", _RK_C_ALIAS,
-	  .sdef = "auto.commit.enable" },
-	{ _RK_TOPIC|_RK_CONSUMER, "auto.commit.interval.ms", _RK_C_INT,
-	  _RKT(auto_commit_interval_ms),
-	  "The frequency in milliseconds that the consumer offsets "
-	  "are committed (written) to offset storage. "
-          "This setting is used by the low-level legacy consumer.",
-	  10, 86400*1000, 60*1000 },
-	{ _RK_TOPIC|_RK_CONSUMER, "auto.offset.reset", _RK_C_S2I,
-	  _RKT(auto_offset_reset),
-	  "Action to take when there is no initial offset in offset store "
-	  "or the desired offset is out of range: "
-	  "'smallest','earliest' - automatically reset the offset to the smallest offset, "
-	  "'largest','latest' - automatically reset the offset to the largest offset, "
-	  "'error' - trigger an error which is retrieved by consuming messages "
-	  "and checking 'message->err'.",
-	  .vdef = RD_KAFKA_OFFSET_END,
-	  .s2i = {
-			{ RD_KAFKA_OFFSET_BEGINNING, "smallest" },
-			{ RD_KAFKA_OFFSET_BEGINNING, "earliest" },
-			{ RD_KAFKA_OFFSET_BEGINNING, "beginning" },
-			{ RD_KAFKA_OFFSET_END, "largest" },
-			{ RD_KAFKA_OFFSET_END, "latest" },
-			{ RD_KAFKA_OFFSET_END, "end" },
-			{ RD_KAFKA_OFFSET_INVALID, "error" },
-		}
-	},
-	{ _RK_TOPIC|_RK_CONSUMER, "offset.store.path", _RK_C_STR,
-	  _RKT(offset_store_path),
-	  "Path to local file for storing offsets. If the path is a directory "
-	  "a filename will be automatically generated in that directory based "
-	  "on the topic and partition.",
-	  .sdef = "." },
-
-	{ _RK_TOPIC|_RK_CONSUMER, "offset.store.sync.interval.ms", _RK_C_INT,
-	  _RKT(offset_store_sync_interval_ms),
-	  "fsync() interval for the offset file, in milliseconds. "
-	  "Use -1 to disable syncing, and 0 for immediate sync after "
-	  "each write.",
-	  -1, 86400*1000, -1 },
-
-        { _RK_TOPIC|_RK_CONSUMER, "offset.store.method", _RK_C_S2I,
-          _RKT(offset_store_method),
-          "Offset commit store method: "
-          "'file' - local file store (offset.store.path, et.al), "
-          "'broker' - broker commit store "
-          "(requires \"group.id\" to be configured and "
-          "Apache Kafka 0.8.2 or later on the broker.).",
-          .vdef = RD_KAFKA_OFFSET_METHOD_BROKER,
-          .s2i = {
-                        { RD_KAFKA_OFFSET_METHOD_FILE, "file" },
-                        { RD_KAFKA_OFFSET_METHOD_BROKER, "broker" }
-                }
-        },
-
-        { _RK_TOPIC|_RK_CONSUMER, "consume.callback.max.messages", _RK_C_INT,
-          _RKT(consume_callback_max_msgs),
-          "Maximum number of messages to dispatch in "
-          "one `rd_kafka_consume_callback*()` call (0 = unlimited)",
-          0, 1000000, 0 },
-
-	{ 0, /* End */ }
-};
-
-
-static rd_kafka_conf_res_t
-rd_kafka_anyconf_set_prop0 (int scope, void *conf,
-			    const struct rd_kafka_property *prop,
-			    const char *istr, int ival, rd_kafka_conf_set_mode_t set_mode,
-                            char *errstr, size_t errstr_size) {
-        rd_kafka_conf_res_t res;
-
-#define _RK_PTR(TYPE,BASE,OFFSET)  (TYPE)(void *)(((char *)(BASE))+(OFFSET))
-
-        /* Try interceptors first (only for GLOBAL config) */
-        if (scope & _RK_GLOBAL) {
-                if (prop->type == _RK_C_PTR || prop->type == _RK_C_INTERNAL)
-                        res = RD_KAFKA_CONF_UNKNOWN;
-                else
-                        res = rd_kafka_interceptors_on_conf_set(conf,
-                                                                prop->name,
-                                                                istr,
-                                                                errstr,
-                                                                errstr_size);
-                if (res != RD_KAFKA_CONF_UNKNOWN)
-                        return res;
-        }
-
-
-        if (prop->set) {
-                /* Custom setter */
-                rd_kafka_conf_res_t res;
-
-                res = prop->set(scope, conf, prop->name, istr,
-                                _RK_PTR(void *, conf, prop->offset),
-                                set_mode, errstr, errstr_size);
-
-                if (res != RD_KAFKA_CONF_OK)
-                        return res;
-
-                /* FALLTHRU so that property value is set. */
-        }
-
-	switch (prop->type)
-	{
-	case _RK_C_STR:
-	{
-		char **str = _RK_PTR(char **, conf, prop->offset);
-		if (*str)
-			rd_free(*str);
-		if (istr)
-			*str = rd_strdup(istr);
-		else
-			*str = prop->sdef ? rd_strdup(prop->sdef) : NULL;
-		return RD_KAFKA_CONF_OK;
-	}
-        case _RK_C_KSTR:
-        {
-                rd_kafkap_str_t **kstr = _RK_PTR(rd_kafkap_str_t **, conf,
-                                                 prop->offset);
-                if (*kstr)
-                        rd_kafkap_str_destroy(*kstr);
-                if (istr)
-                        *kstr = rd_kafkap_str_new(istr, -1);
-                else
-                        *kstr = prop->sdef ?
-				rd_kafkap_str_new(prop->sdef, -1) : NULL;
-                return RD_KAFKA_CONF_OK;
-        }
-	case _RK_C_PTR:
-		*_RK_PTR(const void **, conf, prop->offset) = istr;
-		return RD_KAFKA_CONF_OK;
-	case _RK_C_BOOL:
-	case _RK_C_INT:
-	case _RK_C_S2I:
-	case _RK_C_S2F:
-	{
-		int *val = _RK_PTR(int *, conf, prop->offset);
-
-		if (prop->type == _RK_C_S2F) {
-			switch (set_mode)
-			{
-			case _RK_CONF_PROP_SET_REPLACE:
-				*val = ival;
-				break;
-			case _RK_CONF_PROP_SET_ADD:
-				*val |= ival;
-				break;
-			case _RK_CONF_PROP_SET_DEL:
-				*val &= ~ival;
-				break;
-			}
-		} else {
-			/* Single assignment */
-			*val = ival;
-
-		}
-		return RD_KAFKA_CONF_OK;
-	}
-        case _RK_C_PATLIST:
-        {
-                /* Split comma-separated list into individual regex expressions
-                 * that are verified and then append to the provided list. */
-                rd_kafka_pattern_list_t **plist;
-
-                plist = _RK_PTR(rd_kafka_pattern_list_t **, conf, prop->offset);
-
-		if (*plist)
-			rd_kafka_pattern_list_destroy(*plist);
-
-		if (istr) {
-			if (!(*plist =
-			      rd_kafka_pattern_list_new(istr,
-							errstr,
-							(int)errstr_size)))
-				return RD_KAFKA_CONF_INVALID;
-		} else
-			*plist = NULL;
-
-                return RD_KAFKA_CONF_OK;
-        }
-
-        case _RK_C_INTERNAL:
-                /* Probably handled by setter */
-                return RD_KAFKA_CONF_OK;
-
-	default:
-		rd_kafka_assert(NULL, !*"unknown conf type");
-	}
-
-	/* unreachable */
-	return RD_KAFKA_CONF_INVALID;
-}
-
-
-/**
- * @brief Find s2i (string-to-int mapping) entry and return its array index,
- *        or -1 on miss.
- */
-static int rd_kafka_conf_s2i_find (const struct rd_kafka_property *prop,
-				   const char *value) {
-	int j;
-
-	for (j = 0 ; j < (int)RD_ARRAYSIZE(prop->s2i); j++) {
-		if (prop->s2i[j].str &&
-		    !rd_strcasecmp(prop->s2i[j].str, value))
-			return j;
-	}
-
-	return -1;
-}
-
-
-static rd_kafka_conf_res_t
-rd_kafka_anyconf_set_prop (int scope, void *conf,
-			   const struct rd_kafka_property *prop,
-			   const char *value,
-			   char *errstr, size_t errstr_size) {
-	int ival;
-
-	switch (prop->type)
-	{
-	case _RK_C_STR:
-        case _RK_C_KSTR:
-		if (prop->s2i[0].str) {
-			int match;
-
-			if (!value ||
-			    (match = rd_kafka_conf_s2i_find(prop, value)) == -1){
-				rd_snprintf(errstr, errstr_size,
-					    "Invalid value for "
-					    "configuration property \"%s\": "
-					    "%s",
-					    prop->name, value);
-				return RD_KAFKA_CONF_INVALID;
-			}
-
-			/* Replace value string with canonical form */
-			value = prop->s2i[match].str;
-		}
-		/* FALLTHRU */
-        case _RK_C_PATLIST:
-		if (prop->validate &&
-		    (!value || !prop->validate(prop, value, -1))) {
-			rd_snprintf(errstr, errstr_size,
-				    "Invalid value for "
-				    "configuration property \"%s\": %s",
-				    prop->name, value);
-			return RD_KAFKA_CONF_INVALID;
-		}
-
-		return rd_kafka_anyconf_set_prop0(scope, conf, prop, value, 0,
-						  _RK_CONF_PROP_SET_REPLACE,
-                                                  errstr, errstr_size);
-
-	case _RK_C_PTR:
-		rd_snprintf(errstr, errstr_size,
-			 "Property \"%s\" must be set through dedicated "
-			 ".._set_..() function", prop->name);
-		return RD_KAFKA_CONF_INVALID;
-
-	case _RK_C_BOOL:
-		if (!value) {
-			rd_snprintf(errstr, errstr_size,
-				 "Bool configuration property \"%s\" cannot "
-				 "be set to empty value", prop->name);
-			return RD_KAFKA_CONF_INVALID;
-		}
-
-
-		if (!rd_strcasecmp(value, "true") ||
-		    !rd_strcasecmp(value, "t") ||
-		    !strcmp(value, "1"))
-			ival = 1;
-		else if (!rd_strcasecmp(value, "false") ||
-			 !rd_strcasecmp(value, "f") ||
-			 !strcmp(value, "0"))
-			ival = 0;
-		else {
-			rd_snprintf(errstr, errstr_size,
-				 "Expected bool value for \"%s\": "
-				 "true or false", prop->name);
-			return RD_KAFKA_CONF_INVALID;
-		}
-
-		rd_kafka_anyconf_set_prop0(scope, conf, prop, value, ival,
-					   _RK_CONF_PROP_SET_REPLACE,
-                                           errstr, errstr_size);
-		return RD_KAFKA_CONF_OK;
-
-	case _RK_C_INT:
-	{
-		const char *end;
-
-		if (!value) {
-			rd_snprintf(errstr, errstr_size,
-				 "Integer configuration "
-				 "property \"%s\" cannot be set "
-				 "to empty value", prop->name);
-			return RD_KAFKA_CONF_INVALID;
-		}
-
-		ival = (int)strtol(value, (char **)&end, 0);
-		if (end == value) {
-			/* Non numeric, check s2i for string mapping */
-			int match = rd_kafka_conf_s2i_find(prop, value);
-
-			if (match == -1) {
-				rd_snprintf(errstr, errstr_size,
-					    "Invalid value for "
-					    "configuration property \"%s\"",
-					    prop->name);
-				return RD_KAFKA_CONF_INVALID;
-			}
-
-			ival = prop->s2i[match].val;
-		}
-
-		if (ival < prop->vmin ||
-		    ival > prop->vmax) {
-			rd_snprintf(errstr, errstr_size,
-				 "Configuration property \"%s\" value "
-				 "%i is outside allowed range %i..%i\n",
-				 prop->name, ival,
-				 prop->vmin,
-				 prop->vmax);
-			return RD_KAFKA_CONF_INVALID;
-		}
-
-		rd_kafka_anyconf_set_prop0(scope, conf, prop, value, ival,
-					   _RK_CONF_PROP_SET_REPLACE,
-                                           errstr, errstr_size);
-		return RD_KAFKA_CONF_OK;
-	}
-
-	case _RK_C_S2I:
-	case _RK_C_S2F:
-	{
-		int j;
-		const char *next;
-
-		if (!value) {
-			rd_snprintf(errstr, errstr_size,
-				 "Configuration "
-				 "property \"%s\" cannot be set "
-				 "to empty value", prop->name);
-			return RD_KAFKA_CONF_INVALID;
-		}
-
-		next = value;
-		while (next && *next) {
-			const char *s, *t;
-			rd_kafka_conf_set_mode_t set_mode = _RK_CONF_PROP_SET_ADD; /* S2F */
-
-			s = next;
-
-			if (prop->type == _RK_C_S2F &&
-			    (t = strchr(s, ','))) {
-				/* CSV flag field */
-				next = t+1;
-			} else {
-				/* Single string */
-				t = s+strlen(s);
-				next = NULL;
-			}
-
-
-			/* Left trim */
-			while (s < t && isspace((int)*s))
-				s++;
-
-			/* Right trim */
-			while (t > s && isspace((int)*t))
-				t--;
-
-			/* S2F: +/- prefix */
-			if (prop->type == _RK_C_S2F) {
-				if (*s == '+') {
-					set_mode = _RK_CONF_PROP_SET_ADD;
-					s++;
-				} else if (*s == '-') {
-					set_mode = _RK_CONF_PROP_SET_DEL;
-					s++;
-				}
-			}
-
-			/* Empty string? */
-			if (s == t)
-				continue;
-
-			/* Match string to s2i table entry */
-			for (j = 0 ; j < (int)RD_ARRAYSIZE(prop->s2i); j++) {
-				int new_val;
-
-				if (!prop->s2i[j].str)
-					continue;
-
-				if (strlen(prop->s2i[j].str) == (size_t)(t-s) &&
-					 !rd_strncasecmp(prop->s2i[j].str, s,
-							 (int)(t-s)))
-					new_val = prop->s2i[j].val;
-				else
-					continue;
-
-				rd_kafka_anyconf_set_prop0(scope, conf, prop,
-                                                           value, new_val,
-                                                           set_mode,
-                                                           errstr, errstr_size);
-
-				if (prop->type == _RK_C_S2F) {
-					/* Flags: OR it in: do next */
-					break;
-				} else {
-					/* Single assignment */
-					return RD_KAFKA_CONF_OK;
-				}
-			}
-
-			/* S2F: Good match: continue with next */
-			if (j < (int)RD_ARRAYSIZE(prop->s2i))
-				continue;
-
-			/* No match */
-			rd_snprintf(errstr, errstr_size,
-				 "Invalid value for "
-				 "configuration property \"%s\"", prop->name);
-			return RD_KAFKA_CONF_INVALID;
-
-		}
-		return RD_KAFKA_CONF_OK;
-	}
-
-        case _RK_C_INTERNAL:
-                rd_snprintf(errstr, errstr_size,
-                            "Internal property \"%s\" not settable",
-                            prop->name);
-                return RD_KAFKA_CONF_INVALID;
-
-        case _RK_C_INVALID:
-                rd_snprintf(errstr, errstr_size, "%s", prop->desc);
-                return RD_KAFKA_CONF_INVALID;
-
-	default:
-                rd_kafka_assert(NULL, !*"unknown conf type");
-	}
-
-	/* not reachable */
-	return RD_KAFKA_CONF_INVALID;
-}
-
-
-
-static void rd_kafka_defaultconf_set (int scope, void *conf) {
-	const struct rd_kafka_property *prop;
-
-	for (prop = rd_kafka_properties ; prop->name ; prop++) {
-		if (!(prop->scope & scope))
-			continue;
-
-		if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID)
-			continue;
-
-                if (prop->ctor)
-                        prop->ctor(scope, conf);
-
-		if (prop->sdef || prop->vdef || prop->pdef)
-			rd_kafka_anyconf_set_prop0(scope, conf, prop,
-						   prop->sdef ?
-                                                   prop->sdef : prop->pdef,
-                                                   prop->vdef,
-                                                   _RK_CONF_PROP_SET_REPLACE,
-                                                   NULL, 0);
-	}
-}
-
-rd_kafka_conf_t *rd_kafka_conf_new (void) {
-	rd_kafka_conf_t *conf = rd_calloc(1, sizeof(*conf));
-	rd_kafka_defaultconf_set(_RK_GLOBAL, conf);
-	return conf;
-}
-
-rd_kafka_topic_conf_t *rd_kafka_topic_conf_new (void) {
-	rd_kafka_topic_conf_t *tconf = rd_calloc(1, sizeof(*tconf));
-	rd_kafka_defaultconf_set(_RK_TOPIC, tconf);
-	return tconf;
-}
-
-
-
-static int rd_kafka_anyconf_set (int scope, void *conf,
-				 const char *name, const char *value,
-				 char *errstr, size_t errstr_size) {
-	char estmp[1];
-	const struct rd_kafka_property *prop;
-        rd_kafka_conf_res_t res;
-
-	if (!errstr) {
-		errstr = estmp;
-		errstr_size = 0;
-	}
-
-	if (value && !*value)
-		value = NULL;
-
-        /* Try interceptors first (only for GLOBAL config for now) */
-        if (scope & _RK_GLOBAL) {
-                res = rd_kafka_interceptors_on_conf_set(
-                        (rd_kafka_conf_t *)conf, name, value,
-                        errstr, errstr_size);
-                /* Handled (successfully or not) by interceptor. */
-                if (res != RD_KAFKA_CONF_UNKNOWN)
-                        return res;
-        }
-
-        /* Then global config */
-
-
-	for (prop = rd_kafka_properties ; prop->name ; prop++) {
-
-		if (!(prop->scope & scope))
-			continue;
-
-		if (strcmp(prop->name, name))
-			continue;
-
-		if (prop->type == _RK_C_ALIAS)
-			return rd_kafka_anyconf_set(scope, conf,
-						    prop->sdef, value,
-						    errstr, errstr_size);
-
-		return rd_kafka_anyconf_set_prop(scope, conf, prop, value,
-						 errstr, errstr_size);
-	}
-
-	rd_snprintf(errstr, errstr_size,
-		 "No such configuration property: \"%s\"", name);
-
-	return RD_KAFKA_CONF_UNKNOWN;
-}
-
-
-rd_kafka_conf_res_t rd_kafka_conf_set (rd_kafka_conf_t *conf,
-                                       const char *name,
-                                       const char *value,
-                                       char *errstr, size_t errstr_size) {
-        rd_kafka_conf_res_t res;
-
-        res = rd_kafka_anyconf_set(_RK_GLOBAL, conf, name, value,
-                                   errstr, errstr_size);
-        if (res != RD_KAFKA_CONF_UNKNOWN)
-                return res;
-
-        /* Fallthru:
-         * If the global property was unknown, try setting it on the
-         * default topic config. */
-        if (!conf->topic_conf) {
-                /* Create topic config, might be over-written by application
-                 * later. */
-                conf->topic_conf = rd_kafka_topic_conf_new();
-        }
-
-        return rd_kafka_topic_conf_set(conf->topic_conf, name, value,
-                                       errstr, errstr_size);
-}
-
-
-rd_kafka_conf_res_t rd_kafka_topic_conf_set (rd_kafka_topic_conf_t *conf,
-					     const char *name,
-					     const char *value,
-					     char *errstr, size_t errstr_size) {
-	if (!strncmp(name, "topic.", strlen("topic.")))
-		name += strlen("topic.");
-
-	return rd_kafka_anyconf_set(_RK_TOPIC, conf, name, value,
-				    errstr, errstr_size);
-}
-
-
-static void rd_kafka_anyconf_clear (int scope, void *conf,
-				    const struct rd_kafka_property *prop) {
-	switch (prop->type)
-	{
-	case _RK_C_STR:
-	{
-		char **str = _RK_PTR(char **, conf, prop->offset);
-
-		if (*str) {
-                        if (prop->set) {
-                                prop->set(scope, conf, prop->name, NULL, *str,
-                                          _RK_CONF_PROP_SET_DEL, NULL, 0);
-                                /* FALLTHRU */
-                        }
-                        rd_free(*str);
-			*str = NULL;
-		}
-	}
-	break;
-
-        case _RK_C_KSTR:
-        {
-                rd_kafkap_str_t **kstr = _RK_PTR(rd_kafkap_str_t **, conf,
-                                                 prop->offset);
-                if (*kstr) {
-                        rd_kafkap_str_destroy(*kstr);
-                        *kstr = NULL;
-                }
-        }
-        break;
-
-        case _RK_C_PATLIST:
-        {
-                rd_kafka_pattern_list_t **plist;
-                plist = _RK_PTR(rd_kafka_pattern_list_t **, conf, prop->offset);
-		if (*plist) {
-			rd_kafka_pattern_list_destroy(*plist);
-			*plist = NULL;
-		}
-        }
-        break;
-
-        case _RK_C_PTR:
-                if (_RK_PTR(void *, conf, prop->offset) != NULL) {
-                        if (!strcmp(prop->name, "default_topic_conf")) {
-                                rd_kafka_topic_conf_t **tconf;
-
-                                tconf = _RK_PTR(rd_kafka_topic_conf_t **,
-                                                conf, prop->offset);
-                                if (*tconf) {
-                                        rd_kafka_topic_conf_destroy(*tconf);
-                                        *tconf = NULL;
-                                }
-                        }
-                }
-                break;
-
-	default:
-		break;
-	}
-
-        if (prop->dtor)
-                prop->dtor(scope, conf);
-
-}
-
-void rd_kafka_anyconf_destroy (int scope, void *conf) {
-	const struct rd_kafka_property *prop;
-
-        /* Call on_conf_destroy() interceptors */
-        if (scope == _RK_GLOBAL)
-                rd_kafka_interceptors_on_conf_destroy(conf);
-
-	for (prop = rd_kafka_properties; prop->name ; prop++) {
-		if (!(prop->scope & scope))
-			continue;
-
-		rd_kafka_anyconf_clear(scope, conf, prop);
-	}
-}
-
-
-void rd_kafka_conf_destroy (rd_kafka_conf_t *conf) {
-	rd_kafka_anyconf_destroy(_RK_GLOBAL, conf);
-        //FIXME: partition_assignors
-	rd_free(conf);
-}
-
-void rd_kafka_topic_conf_destroy (rd_kafka_topic_conf_t *topic_conf) {
-	rd_kafka_anyconf_destroy(_RK_TOPIC, topic_conf);
-	rd_free(topic_conf);
-}
-
-
-
-static void rd_kafka_anyconf_copy (int scope, void *dst, const void *src,
-                                   size_t filter_cnt, const char **filter) {
-	const struct rd_kafka_property *prop;
-
-	for (prop = rd_kafka_properties ; prop->name ; prop++) {
-		const char *val = NULL;
-		int ival = 0;
-                char *valstr;
-                size_t valsz;
-                size_t fi;
-                size_t nlen;
-
-		if (!(prop->scope & scope))
-			continue;
-
-		if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID)
-			continue;
-
-                /* Apply filter, if any. */
-                nlen = strlen(prop->name);
-                for (fi = 0 ; fi < filter_cnt ; fi++) {
-                        size_t flen = strlen(filter[fi]);
-                        if (nlen >= flen &&
-                            !strncmp(filter[fi], prop->name, flen))
-                                break;
-                }
-                if (fi < filter_cnt)
-                        continue; /* Filter matched */
-
-		switch (prop->type)
-		{
-		case _RK_C_STR:
-		case _RK_C_PTR:
-			val = *_RK_PTR(const char **, src, prop->offset);
-
-                        if (!strcmp(prop->name, "default_topic_conf") && val)
-                                val = (void *)rd_kafka_topic_conf_dup(
-                                        (const rd_kafka_topic_conf_t *)
-                                        (void *)val);
-			break;
-                case _RK_C_KSTR:
-                {
-                        rd_kafkap_str_t **kstr = _RK_PTR(rd_kafkap_str_t **,
-                                                         src, prop->offset);
-                        if (*kstr)
-                                val = (*kstr)->str;
-                        break;
-                }
-
-		case _RK_C_BOOL:
-		case _RK_C_INT:
-		case _RK_C_S2I:
-		case _RK_C_S2F:
-			ival = *_RK_PTR(const int *, src, prop->offset);
-
-                        /* Get string representation of configuration value. */
-                        valsz = 0;
-                        rd_kafka_anyconf_get0(src, prop, NULL, &valsz);
-                        valstr = rd_alloca(valsz);
-                        rd_kafka_anyconf_get0(src, prop, valstr, &valsz);
-                        val = valstr;
-			break;
-                case _RK_C_PATLIST:
-                {
-                        const rd_kafka_pattern_list_t **plist;
-                        plist = _RK_PTR(const rd_kafka_pattern_list_t **,
-                                        src, prop->offset);
-			if (*plist)
-				val = (*plist)->rkpl_orig;
-                        break;
-                }
-                case _RK_C_INTERNAL:
-                        /* Handled by ->copy() below. */
-                        break;
-		default:
-			continue;
-		}
-
-                if (prop->copy)
-                        prop->copy(scope, dst, src,
-                                   _RK_PTR(void *, dst, prop->offset),
-                                   _RK_PTR(const void *, src, prop->offset),
-                                   filter_cnt, filter);
-
-                rd_kafka_anyconf_set_prop0(scope, dst, prop, val, ival,
-                                           _RK_CONF_PROP_SET_REPLACE, NULL, 0);
-	}
-}
-
-
-rd_kafka_conf_t *rd_kafka_conf_dup (const rd_kafka_conf_t *conf) {
-	rd_kafka_conf_t *new = rd_kafka_conf_new();
-
-        rd_kafka_interceptors_on_conf_dup(new, conf, 0, NULL);
-
-        rd_kafka_anyconf_copy(_RK_GLOBAL, new, conf, 0, NULL);
-
-	return new;
-}
-
-rd_kafka_conf_t *rd_kafka_conf_dup_filter (const rd_kafka_conf_t *conf,
-                                           size_t filter_cnt,
-                                           const char **filter) {
-	rd_kafka_conf_t *new = rd_kafka_conf_new();
-
-        rd_kafka_interceptors_on_conf_dup(new, conf, filter_cnt, filter);
-
-        rd_kafka_anyconf_copy(_RK_GLOBAL, new, conf, filter_cnt, filter);
-
-	return new;
-}
-
-
-rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup (const rd_kafka_topic_conf_t
-						*conf) {
-	rd_kafka_topic_conf_t *new = rd_kafka_topic_conf_new();
-
-	rd_kafka_anyconf_copy(_RK_TOPIC, new, conf, 0, NULL);
-
-	return new;
-}
-
-
-void rd_kafka_conf_set_events (rd_kafka_conf_t *conf, int events) {
-	conf->enabled_events = events;
-}
-
-
-void rd_kafka_conf_set_dr_cb (rd_kafka_conf_t *conf,
-			      void (*dr_cb) (rd_kafka_t *rk,
-					     void *payload, size_t len,
-					     rd_kafka_resp_err_t err,
-					     void *opaque, void *msg_opaque)) {
-	conf->dr_cb = dr_cb;
-}
-
-
-void rd_kafka_conf_set_dr_msg_cb (rd_kafka_conf_t *conf,
-                                  void (*dr_msg_cb) (rd_kafka_t *rk,
-                                                     const rd_kafka_message_t *
-                                                     rkmessage,
-                                                     void *opaque)) {
-        conf->dr_msg_cb = dr_msg_cb;
-}
-
-
-void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf,
-                                   void (*consume_cb) (rd_kafka_message_t *
-                                                       rkmessage,
-                                                       void *opaque)) {
-        conf->consume_cb = consume_cb;
-}
-
-void rd_kafka_conf_set_rebalance_cb (
-        rd_kafka_conf_t *conf,
-        void (*rebalance_cb) (rd_kafka_t *rk,
-                              rd_kafka_resp_err_t err,
-                              rd_kafka_topic_partition_list_t *partitions,
-                              void *opaque)) {
-        conf->rebalance_cb = rebalance_cb;
-}
-
-void rd_kafka_conf_set_offset_commit_cb (
-        rd_kafka_conf_t *conf,
-        void (*offset_commit_cb) (rd_kafka_t *rk,
-                                  rd_kafka_resp_err_t err,
-                                  rd_kafka_topic_partition_list_t *offsets,
-                                  void *opaque)) {
-        conf->offset_commit_cb = offset_commit_cb;
-}
-
-
-
-void rd_kafka_conf_set_error_cb (rd_kafka_conf_t *conf,
-				 void  (*error_cb) (rd_kafka_t *rk, int err,
-						    const char *reason,
-						    void *opaque)) {
-	conf->error_cb = error_cb;
-}
-
-
-void rd_kafka_conf_set_throttle_cb (rd_kafka_conf_t *conf,
-				    void (*throttle_cb) (
-					    rd_kafka_t *rk,
-					    const char *broker_name,
-					    int32_t broker_id,
-					    int throttle_time_ms,
-					    void *opaque)) {
-	conf->throttle_cb = throttle_cb;
-}
-
-
-void rd_kafka_conf_set_log_cb (rd_kafka_conf_t *conf,
-			  void (*log_cb) (const rd_kafka_t *rk, int level,
-                                          const char *fac, const char *buf)) {
-	conf->log_cb = log_cb;
-}
-
-
-void rd_kafka_conf_set_stats_cb (rd_kafka_conf_t *conf,
-				 int (*stats_cb) (rd_kafka_t *rk,
-						  char *json,
-						  size_t json_len,
-						  void *opaque)) {
-	conf->stats_cb = stats_cb;
-}
-
-void rd_kafka_conf_set_socket_cb (rd_kafka_conf_t *conf,
-                                  int (*socket_cb) (int domain, int type,
-                                                    int protocol,
-                                                    void *opaque)) {
-        conf->socket_cb = socket_cb;
-}
-
-void
-rd_kafka_conf_set_connect_cb (rd_kafka_conf_t *conf,
-                              int (*connect_cb) (int sockfd,
-                                                 const struct sockaddr *addr,
-                                                 int addrlen,
-                                                 const char *id,
-                                                 void *opaque)) {
-        conf->connect_cb = connect_cb;
-}
-
-void
-rd_kafka_conf_set_closesocket_cb (rd_kafka_conf_t *conf,
-                                  int (*closesocket_cb) (int sockfd,
-                                                         void *opaque)) {
-        conf->closesocket_cb = closesocket_cb;
-}
-
-
-
-#ifndef _MSC_VER
-void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf,
-                                int (*open_cb) (const char *pathname,
-                                                int flags, mode_t mode,
-                                                void *opaque)) {
-        conf->open_cb = open_cb;
-}
-#endif
-
-void rd_kafka_conf_set_opaque (rd_kafka_conf_t *conf, void *opaque) {
-	conf->opaque = opaque;
-}
-
-
-void rd_kafka_conf_set_default_topic_conf (rd_kafka_conf_t *conf,
-                                           rd_kafka_topic_conf_t *tconf) {
-        if (conf->topic_conf)
-                rd_kafka_topic_conf_destroy(conf->topic_conf);
-
-        conf->topic_conf = tconf;
-}
-
-
-void
-rd_kafka_topic_conf_set_partitioner_cb (rd_kafka_topic_conf_t *topic_conf,
-					int32_t (*partitioner) (
-						const rd_kafka_topic_t *rkt,
-						const void *keydata,
-						size_t keylen,
-						int32_t partition_cnt,
-						void *rkt_opaque,
-						void *msg_opaque)) {
-	topic_conf->partitioner = partitioner;
-}
-
-void rd_kafka_topic_conf_set_opaque (rd_kafka_topic_conf_t *topic_conf,
-				     void *opaque) {
-	topic_conf->opaque = opaque;
-}
-
-
-
-
-/**
- * @brief Convert flags \p ival to csv-string using S2F property \p prop.
- *
- * This function has two modes: size query and write.
- * To query for needed size call with dest==NULL,
- * to write to buffer of size dest_size call with dest!=NULL.
- *
- * An \p ival of -1 means all.
- *
- * @returns the number of bytes written to \p dest (if not NULL), else the
- *          total number of bytes needed.
- *
- */
-size_t rd_kafka_conf_flags2str (char *dest, size_t dest_size, const char *delim,
-				const struct rd_kafka_property *prop,
-				int ival) {
-	size_t of = 0;
-	int j;
-
-	if (dest && dest_size > 0)
-		*dest = '\0';
-
-	/* Phase 1: scan for set flags, accumulate needed size.
-	 * Phase 2: write to dest */
-	for (j = 0 ; prop->s2i[j].str ; j++) {
-		if (prop->type == _RK_C_S2F && ival != -1 &&
-		    (ival & prop->s2i[j].val) != prop->s2i[j].val)
-			continue;
-		else if (prop->type == _RK_C_S2I &&
-			   ival != -1 && prop->s2i[j].val != ival)
-			continue;
-
-		if (!dest)
-			of += strlen(prop->s2i[j].str) + (of > 0 ? 1 : 0);
-		else {
-			size_t r;
-			r = rd_snprintf(dest+of, dest_size-of,
-					"%s%s",
-					of > 0 ? delim:"",
-					prop->s2i[j].str);
-			if (r > dest_size-of) {
-				r = dest_size-of;
-				break;
-			}
-			of += r;
-		}
-	}
-
-	return of+1/*nul*/;
-}
-
-
-/**
- * Return "original"(re-created) configuration value string
- */
-static rd_kafka_conf_res_t
-rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop,
-                       char *dest, size_t *dest_size) {
-        char tmp[22];
-        const char *val = NULL;
-        size_t val_len = 0;
-        int j;
-
-        switch (prop->type)
-        {
-        case _RK_C_STR:
-                val = *_RK_PTR(const char **, conf, prop->offset);
-                break;
-
-        case _RK_C_KSTR:
-        {
-                const rd_kafkap_str_t **kstr = _RK_PTR(const rd_kafkap_str_t **,
-                                                       conf, prop->offset);
-                if (*kstr)
-                        val = (*kstr)->str;
-                break;
-        }
-
-        case _RK_C_PTR:
-                val = *_RK_PTR(const void **, conf, prop->offset);
-                if (val) {
-                        rd_snprintf(tmp, sizeof(tmp), "%p", (void *)val);
-                        val = tmp;
-                }
-                break;
-
-        case _RK_C_BOOL:
-                val = (*_RK_PTR(int *, conf, prop->offset) ? "true" : "false");
-                break;
-
-        case _RK_C_INT:
-                rd_snprintf(tmp, sizeof(tmp), "%i",
-                            *_RK_PTR(int *, conf, prop->offset));
-                val = tmp;
-                break;
-
-        case _RK_C_S2I:
-                for (j = 0 ; j < (int)RD_ARRAYSIZE(prop->s2i); j++) {
-                        if (prop->s2i[j].val ==
-                            *_RK_PTR(int *, conf, prop->offset)) {
-                                val = prop->s2i[j].str;
-                                break;
-                        }
-                }
-                break;
-
-        case _RK_C_S2F:
-        {
-                const int ival = *_RK_PTR(const int *, conf, prop->offset);
-
-		val_len = rd_kafka_conf_flags2str(dest, *dest_size, ",",
-						  prop, ival);
-		if (dest) {
-			val_len = 0;
-			val = dest;
-			dest = NULL;
-		}
-		break;
-	}
-
-        case _RK_C_PATLIST:
-        {
-                const rd_kafka_pattern_list_t **plist;
-                plist = _RK_PTR(const rd_kafka_pattern_list_t **,
-                                conf, prop->offset);
-		if (*plist)
-			val = (*plist)->rkpl_orig;
-                break;
-        }
-
-        default:
-                break;
-        }
-
-        if (val_len) {
-                *dest_size = val_len+1;
-                return RD_KAFKA_CONF_OK;
-        }
-
-        if (!val)
-                return RD_KAFKA_CONF_INVALID;
-
-        val_len = strlen(val);
-
-        if (dest) {
-                size_t use_len = RD_MIN(val_len, (*dest_size)-1);
-                memcpy(dest, val, use_len);
-                dest[use_len] = '\0';
-        }
-
-        /* Return needed size */
-        *dest_size = val_len+1;
-
-        return RD_KAFKA_CONF_OK;
-}
-
-
-static rd_kafka_conf_res_t rd_kafka_anyconf_get (int scope, const void *conf,
-                                                 const char *name,
-                                                 char *dest, size_t *dest_size){
-	const struct rd_kafka_property *prop;
-
-	for (prop = rd_kafka_properties; prop->name ; prop++) {
-
-		if (!(prop->scope & scope) || strcmp(prop->name, name))
-			continue;
-
-		if (prop->type == _RK_C_ALIAS)
-			return rd_kafka_anyconf_get(scope, conf,
-						    prop->sdef,
-						    dest, dest_size);
-
-                if (rd_kafka_anyconf_get0(conf, prop, dest, dest_size) ==
-                    RD_KAFKA_CONF_OK)
-                        return RD_KAFKA_CONF_OK;
-        }
-
-        return RD_KAFKA_CONF_UNKNOWN;
-}
-
-rd_kafka_conf_res_t rd_kafka_topic_conf_get (const rd_kafka_topic_conf_t *conf,
-                                             const char *name,
-                                             char *dest, size_t *dest_size) {
-        return rd_kafka_anyconf_get(_RK_TOPIC, conf, name, dest, dest_size);
-}
-
-rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf,
-                                       const char *name,
-                                       char *dest, size_t *dest_size) {
-        rd_kafka_conf_res_t res;
-        res = rd_kafka_anyconf_get(_RK_GLOBAL, conf, name, dest, dest_size);
-        if (res != RD_KAFKA_CONF_UNKNOWN || !conf->topic_conf)
-                return res;
-
-        /* Fallthru:
-         * If the global property was unknown, try getting it from the
-         * default topic config, if any. */
-        return rd_kafka_topic_conf_get(conf->topic_conf, name, dest, dest_size);
-}
-
-
-static const char **rd_kafka_anyconf_dump (int scope, const void *conf,
-					   size_t *cntp) {
-	const struct rd_kafka_property *prop;
-	char **arr;
-	int cnt = 0;
-
-	arr = rd_calloc(sizeof(char *), RD_ARRAYSIZE(rd_kafka_properties)*2);
-
-	for (prop = rd_kafka_properties; prop->name ; prop++) {
-                char *val = NULL;
-                size_t val_size;
-
-		if (!(prop->scope & scope))
-			continue;
-
-		/* Skip aliases, show original property instead.
-                 * Skip invalids. */
-		if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID)
-			continue;
-
-                /* Query value size */
-                if (rd_kafka_anyconf_get0(conf, prop, NULL, &val_size) !=
-                    RD_KAFKA_CONF_OK)
-                        continue;
-
-                /* Get value */
-                val = malloc(val_size);
-                rd_kafka_anyconf_get0(conf, prop, val, &val_size);
-
-                arr[cnt++] = rd_strdup(prop->name);
-                arr[cnt++] = val;
-	}
-
-	*cntp = cnt;
-
-	return (const char **)arr;
-}
-
-
-const char **rd_kafka_conf_dump (rd_kafka_conf_t *conf, size_t *cntp) {
-	return rd_kafka_anyconf_dump(_RK_GLOBAL, conf, cntp);
-}
-
-const char **rd_kafka_topic_conf_dump (rd_kafka_topic_conf_t *conf,
-				       size_t *cntp) {
-	return rd_kafka_anyconf_dump(_RK_TOPIC, conf, cntp);
-}
-
-void rd_kafka_conf_dump_free (const char **arr, size_t cnt) {
-	char **_arr = (char **)arr;
-	unsigned int i;
-
-	for (i = 0 ; i < cnt ; i++)
-		if (_arr[i])
-			rd_free(_arr[i]);
-
-	rd_free(_arr);
-}
-
-void rd_kafka_conf_properties_show (FILE *fp) {
-	const struct rd_kafka_property *prop;
-	int last = 0;
-	int j;
-	char tmp[512];
-	const char *dash80 = "----------------------------------------"
-		"----------------------------------------";
-
-	for (prop = rd_kafka_properties; prop->name ; prop++) {
-		const char *typeinfo = "";
-
-                /* Skip invalid properties. */
-                if (prop->type == _RK_C_INVALID)
-                        continue;
-
-		if (!(prop->scope & last)) {
-			fprintf(fp,
-				"%s## %s configuration properties\n\n",
-				last ? "\n\n":"",
-				prop->scope == _RK_GLOBAL ? "Global": "Topic");
-
-			fprintf(fp,
-				"%-40s | %3s | %-15s | %13s | %-25s\n"
-				"%.*s-|-%.*s-|-%.*s-|-%.*s:|-%.*s\n",
-				"Property", "C/P", "Range",
-				"Default", "Description",
-				40, dash80, 3, dash80, 15, dash80,
-				13, dash80, 25, dash80);
-
-			last = prop->scope & (_RK_GLOBAL|_RK_TOPIC);
-
-		}
-
-		fprintf(fp, "%-40s | %3s | ", prop->name,
-                        (!(prop->scope & _RK_PRODUCER) ==
-                         !(prop->scope & _RK_CONSUMER) ? " * " :
-                         ((prop->scope & _RK_PRODUCER) ? " P " :
-                          (prop->scope & _RK_CONSUMER) ? " C " : "")));
-
-		switch (prop->type)
-		{
-		case _RK_C_STR:
-                case _RK_C_KSTR:
-			typeinfo = "string";
-                case _RK_C_PATLIST:
-			if (prop->type == _RK_C_PATLIST)
-				typeinfo = "pattern list";
-			if (prop->s2i[0].str) {
-				rd_kafka_conf_flags2str(tmp, sizeof(tmp), ", ",
-							prop, -1);
-				fprintf(fp, "%-15s | %13s",
-					tmp, prop->sdef ? prop->sdef : "");
-			} else {
-				fprintf(fp, "%-15s | %13s",
-					"", prop->sdef ? prop->sdef : "");
-			}
-			break;
-		case _RK_C_BOOL:
-			typeinfo = "boolean";
-			fprintf(fp, "%-15s | %13s", "true, false",
-				prop->vdef ? "true" : "false");
-			break;
-		case _RK_C_INT:
-			typeinfo = "integer";
-			rd_snprintf(tmp, sizeof(tmp),
-				    "%d .. %d", prop->vmin, prop->vmax);
-			fprintf(fp, "%-15s | %13i", tmp, prop->vdef);
-			break;
-		case _RK_C_S2I:
-			typeinfo = "enum value";
-			rd_kafka_conf_flags2str(tmp, sizeof(tmp), ", ",
-						prop, -1);
-			fprintf(fp, "%-15s | ", tmp);
-
-			for (j = 0 ; j < (int)RD_ARRAYSIZE(prop->s2i); j++) {
-				if (prop->s2i[j].val == prop->vdef) {
-					fprintf(fp, "%13s", prop->s2i[j].str);
-					break;
-				}
-			}
-			if (j == RD_ARRAYSIZE(prop->s2i))
-				fprintf(fp, "%13s", " ");
-			break;
-
-		case _RK_C_S2F:
-			typeinfo = "CSV flags";
-			/* Dont duplicate builtin.features value in
-			 * both Range and Default */
-			if (!strcmp(prop->name, "builtin.features"))
-				*tmp = '\0';
-			else
-				rd_kafka_conf_flags2str(tmp, sizeof(tmp), ", ",
-							prop, -1);
-			fprintf(fp, "%-15s | ", tmp);
-			rd_kafka_conf_flags2str(tmp, sizeof(tmp), ", ",
-						prop, prop->vdef);
-			fprintf(fp, "%13s", tmp);
-
-			break;
-		case _RK_C_PTR:
-			typeinfo = "pointer";
-			/* FALLTHRU */
-		default:
-			fprintf(fp, "%-15s | %-13s", "", " ");
-			break;
-		}
-
-		if (prop->type == _RK_C_ALIAS)
-			fprintf(fp, " | Alias for `%s`\n", prop->sdef);
-		else
-			fprintf(fp, " | %s <br>*Type: %s*\n", prop->desc,
-				typeinfo);
-	}
-	fprintf(fp, "\n");
-        fprintf(fp, "### C/P legend: C = Consumer, P = Producer, * = both\n");
-}