You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nifi.apache.org by al...@apache.org on 2019/06/14 20:32:52 UTC

[nifi-minifi-cpp] branch master updated: MINIFICPP-923: remove librdkafka, and deal with some bugs

This is an automated email from the ASF dual-hosted git repository.

aldrin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nifi-minifi-cpp.git


The following commit(s) were added to refs/heads/master by this push:
     new 23b30f7  MINIFICPP-923: remove librdkafka, and deal with some bugs
23b30f7 is described below

commit 23b30f7e5521efe0ec488057036a1b52f19132fe
Author: Marc Parisi <ph...@apache.org>
AuthorDate: Fri Jun 14 11:41:23 2019 -0400

    MINIFICPP-923: remove librdkafka, and deal with some bugs
    
    This closes #594.
    
    Signed-off-by: Aldrin Piri <al...@apache.org>
---
 CMakeLists.txt                                     |    2 +-
 cmake/ssl/FindOpenSSL.cmake                        |    3 +-
 extensions/librdkafka/CMakeLists.txt               |   45 +-
 extensions/librdkafka/PublishKafka.cpp             |   22 +-
 extensions/librdkafka/PublishKafka.h               |    2 +-
 thirdparty/librdkafka-0.11.4/.appveyor.yml         |   88 -
 thirdparty/librdkafka-0.11.4/.dir-locals.el        |    3 -
 thirdparty/librdkafka-0.11.4/.doozer.json          |  110 -
 .../librdkafka-0.11.4/.github/ISSUE_TEMPLATE       |   32 -
 thirdparty/librdkafka-0.11.4/.gitignore            |   28 -
 thirdparty/librdkafka-0.11.4/.travis.yml           |   42 -
 thirdparty/librdkafka-0.11.4/CMakeLists.txt        |  183 -
 thirdparty/librdkafka-0.11.4/CODE_OF_CONDUCT.md    |   46 -
 thirdparty/librdkafka-0.11.4/CONFIGURATION.md      |  138 -
 thirdparty/librdkafka-0.11.4/CONTRIBUTING.md       |  271 --
 thirdparty/librdkafka-0.11.4/Doxyfile              | 2385 -----------
 thirdparty/librdkafka-0.11.4/INTRODUCTION.md       |  735 ----
 thirdparty/librdkafka-0.11.4/LICENSE               |   25 -
 thirdparty/librdkafka-0.11.4/LICENSE.crc32c        |   28 -
 thirdparty/librdkafka-0.11.4/LICENSE.lz4           |   26 -
 thirdparty/librdkafka-0.11.4/LICENSE.murmur2       |   25 -
 thirdparty/librdkafka-0.11.4/LICENSE.pycrc         |   23 -
 thirdparty/librdkafka-0.11.4/LICENSE.queue         |   31 -
 thirdparty/librdkafka-0.11.4/LICENSE.regexp        |    5 -
 thirdparty/librdkafka-0.11.4/LICENSE.snappy        |   36 -
 thirdparty/librdkafka-0.11.4/LICENSE.tinycthread   |   26 -
 thirdparty/librdkafka-0.11.4/LICENSE.wingetopt     |   49 -
 thirdparty/librdkafka-0.11.4/LICENSES.txt          |  313 --
 thirdparty/librdkafka-0.11.4/Makefile              |   68 -
 thirdparty/librdkafka-0.11.4/README.md             |  168 -
 thirdparty/librdkafka-0.11.4/README.win32          |   28 -
 thirdparty/librdkafka-0.11.4/configure             |  214 -
 thirdparty/librdkafka-0.11.4/configure.librdkafka  |  215 -
 thirdparty/librdkafka-0.11.4/dev-conf.sh           |   45 -
 thirdparty/librdkafka-0.11.4/examples/.gitignore   |    8 -
 .../librdkafka-0.11.4/examples/CMakeLists.txt      |   30 -
 thirdparty/librdkafka-0.11.4/examples/Makefile     |   96 -
 thirdparty/librdkafka-0.11.4/examples/globals.json |   11 -
 .../examples/kafkatest_verifiable_client.cpp       |  960 -----
 .../examples/rdkafka_consume_batch.cpp             |  260 --
 .../examples/rdkafka_consumer_example.c            |  624 ---
 .../examples/rdkafka_consumer_example.cpp          |  485 ---
 .../librdkafka-0.11.4/examples/rdkafka_example.c   |  885 ----
 .../librdkafka-0.11.4/examples/rdkafka_example.cpp |  645 ---
 .../examples/rdkafka_performance.c                 | 1651 --------
 .../examples/rdkafka_simple_producer.c             |  260 --
 .../examples/rdkafka_zookeeper_example.c           |  728 ----
 thirdparty/librdkafka-0.11.4/lds-gen.py            |   38 -
 thirdparty/librdkafka-0.11.4/mainpage.doxy         |   35 -
 thirdparty/librdkafka-0.11.4/mklove/Makefile.base  |  215 -
 .../mklove/modules/configure.atomics               |  144 -
 .../mklove/modules/configure.base                  | 1771 --------
 .../mklove/modules/configure.builtin               |   62 -
 .../librdkafka-0.11.4/mklove/modules/configure.cc  |  178 -
 .../librdkafka-0.11.4/mklove/modules/configure.cxx |    8 -
 .../mklove/modules/configure.fileversion           |   65 -
 .../mklove/modules/configure.gitversion            |   19 -
 .../mklove/modules/configure.good_cflags           |   18 -
 .../mklove/modules/configure.host                  |  110 -
 .../librdkafka-0.11.4/mklove/modules/configure.lib |   49 -
 .../mklove/modules/configure.parseversion          |   95 -
 .../librdkafka-0.11.4/mklove/modules/configure.pic |   16 -
 .../mklove/modules/configure.socket                |   20 -
 thirdparty/librdkafka-0.11.4/packaging/RELEASE.md  |  137 -
 .../librdkafka-0.11.4/packaging/archlinux/PKGBUILD |    5 -
 .../packaging/cmake/Config.cmake.in                |   20 -
 .../librdkafka-0.11.4/packaging/cmake/README.md    |   38 -
 .../librdkafka-0.11.4/packaging/cmake/config.h.in  |   40 -
 .../packaging/cmake/try_compile/atomic_32_test.c   |    8 -
 .../packaging/cmake/try_compile/atomic_64_test.c   |    8 -
 .../packaging/cmake/try_compile/dlopen_test.c      |   11 -
 .../packaging/cmake/try_compile/libsasl2_test.c    |    7 -
 .../cmake/try_compile/rdkafka_setup.cmake          |   76 -
 .../packaging/cmake/try_compile/regex_test.c       |   10 -
 .../packaging/cmake/try_compile/strndup_test.c     |    5 -
 .../packaging/cmake/try_compile/sync_32_test.c     |    8 -
 .../packaging/cmake/try_compile/sync_64_test.c     |    8 -
 .../librdkafka-0.11.4/packaging/debian/.gitignore  |    6 -
 .../librdkafka-0.11.4/packaging/debian/changelog   |   66 -
 .../librdkafka-0.11.4/packaging/debian/compat      |    1 -
 .../librdkafka-0.11.4/packaging/debian/control     |   49 -
 .../librdkafka-0.11.4/packaging/debian/copyright   |   84 -
 thirdparty/librdkafka-0.11.4/packaging/debian/docs |    3 -
 .../librdkafka-0.11.4/packaging/debian/gbp.conf    |    9 -
 .../packaging/debian/librdkafka-dev.dirs           |    2 -
 .../packaging/debian/librdkafka-dev.examples       |    2 -
 .../packaging/debian/librdkafka-dev.install        |    6 -
 .../packaging/debian/librdkafka-dev.substvars      |    1 -
 .../packaging/debian/librdkafka.dsc                |   16 -
 .../packaging/debian/librdkafka1-dbg.substvars     |    1 -
 .../packaging/debian/librdkafka1.dirs              |    1 -
 .../packaging/debian/librdkafka1.install           |    2 -
 .../debian/librdkafka1.postinst.debhelper          |    5 -
 .../packaging/debian/librdkafka1.postrm.debhelper  |    5 -
 .../packaging/debian/librdkafka1.symbols           |   64 -
 .../librdkafka-0.11.4/packaging/debian/rules       |   19 -
 .../packaging/debian/source/format                 |    1 -
 .../librdkafka-0.11.4/packaging/debian/watch       |    2 -
 .../librdkafka-0.11.4/packaging/get_version.py     |   21 -
 .../librdkafka-0.11.4/packaging/homebrew/README.md |   15 -
 .../packaging/homebrew/brew-update-pr.sh           |   31 -
 .../librdkafka-0.11.4/packaging/nuget/.gitignore   |    5 -
 .../librdkafka-0.11.4/packaging/nuget/README.md    |   50 -
 .../librdkafka-0.11.4/packaging/nuget/artifact.py  |  173 -
 .../msvcr120.zip                                   |  Bin 520101 -> 0 bytes
 .../msvcr120.zip                                   |  Bin 461473 -> 0 bytes
 .../librdkafka-0.11.4/packaging/nuget/nuget.sh     |   21 -
 .../librdkafka-0.11.4/packaging/nuget/packaging.py |  421 --
 .../librdkafka-0.11.4/packaging/nuget/release.py   |   83 -
 .../packaging/nuget/requirements.txt               |    2 -
 .../nuget/templates/librdkafka.redist.nuspec       |   21 -
 .../nuget/templates/librdkafka.redist.props        |   18 -
 .../nuget/templates/librdkafka.redist.targets      |   19 -
 .../packaging/nuget/zfile/__init__.py              |    0
 .../packaging/nuget/zfile/zfile.py                 |  100 -
 .../librdkafka-0.11.4/packaging/rpm/.gitignore     |    3 -
 .../librdkafka-0.11.4/packaging/rpm/Makefile       |   81 -
 .../librdkafka-0.11.4/packaging/rpm/el7-x86_64.cfg |   40 -
 .../packaging/rpm/librdkafka.spec                  |  104 -
 .../packaging/tools/build-debian.sh                |   53 -
 .../librdkafka-0.11.4/src-cpp/CMakeLists.txt       |   35 -
 thirdparty/librdkafka-0.11.4/src-cpp/ConfImpl.cpp  |   89 -
 .../librdkafka-0.11.4/src-cpp/ConsumerImpl.cpp     |  233 --
 .../librdkafka-0.11.4/src-cpp/HandleImpl.cpp       |  365 --
 .../src-cpp/KafkaConsumerImpl.cpp                  |  257 --
 thirdparty/librdkafka-0.11.4/src-cpp/Makefile      |   49 -
 .../librdkafka-0.11.4/src-cpp/MessageImpl.cpp      |   38 -
 .../librdkafka-0.11.4/src-cpp/MetadataImpl.cpp     |  151 -
 .../librdkafka-0.11.4/src-cpp/ProducerImpl.cpp     |  167 -
 thirdparty/librdkafka-0.11.4/src-cpp/QueueImpl.cpp |   71 -
 thirdparty/librdkafka-0.11.4/src-cpp/README.md     |   16 -
 thirdparty/librdkafka-0.11.4/src-cpp/RdKafka.cpp   |   52 -
 thirdparty/librdkafka-0.11.4/src-cpp/TopicImpl.cpp |  128 -
 .../src-cpp/TopicPartitionImpl.cpp                 |   55 -
 thirdparty/librdkafka-0.11.4/src-cpp/rdkafkacpp.h  | 2284 -----------
 .../librdkafka-0.11.4/src-cpp/rdkafkacpp_int.h     |  910 -----
 thirdparty/librdkafka-0.11.4/src/CMakeLists.txt    |  186 -
 thirdparty/librdkafka-0.11.4/src/Makefile          |   82 -
 thirdparty/librdkafka-0.11.4/src/crc32c.c          |  438 --
 thirdparty/librdkafka-0.11.4/src/crc32c.h          |   38 -
 .../src/librdkafka_cgrp_synch.png                  |  Bin 93796 -> 0 bytes
 thirdparty/librdkafka-0.11.4/src/lz4.c             | 1462 -------
 thirdparty/librdkafka-0.11.4/src/lz4.h             |  463 ---
 thirdparty/librdkafka-0.11.4/src/lz4frame.c        | 1440 -------
 thirdparty/librdkafka-0.11.4/src/lz4frame.h        |  367 --
 thirdparty/librdkafka-0.11.4/src/lz4frame_static.h |   98 -
 thirdparty/librdkafka-0.11.4/src/lz4hc.c           |  786 ----
 thirdparty/librdkafka-0.11.4/src/lz4hc.h           |  269 --
 thirdparty/librdkafka-0.11.4/src/lz4opt.h          |  360 --
 thirdparty/librdkafka-0.11.4/src/queue.h           |  850 ----
 thirdparty/librdkafka-0.11.4/src/rd.h              |  457 ---
 thirdparty/librdkafka-0.11.4/src/rdaddr.c          |  220 -
 thirdparty/librdkafka-0.11.4/src/rdaddr.h          |  187 -
 thirdparty/librdkafka-0.11.4/src/rdatomic.h        |  191 -
 thirdparty/librdkafka-0.11.4/src/rdavg.h           |   97 -
 thirdparty/librdkafka-0.11.4/src/rdavl.c           |  214 -
 thirdparty/librdkafka-0.11.4/src/rdavl.h           |  256 --
 thirdparty/librdkafka-0.11.4/src/rdbuf.c           | 1550 -------
 thirdparty/librdkafka-0.11.4/src/rdbuf.h           |  325 --
 thirdparty/librdkafka-0.11.4/src/rdcrc32.c         |  113 -
 thirdparty/librdkafka-0.11.4/src/rdcrc32.h         |  146 -
 thirdparty/librdkafka-0.11.4/src/rddl.c            |  179 -
 thirdparty/librdkafka-0.11.4/src/rddl.h            |   41 -
 thirdparty/librdkafka-0.11.4/src/rdendian.h        |  169 -
 thirdparty/librdkafka-0.11.4/src/rdgz.c            |  124 -
 thirdparty/librdkafka-0.11.4/src/rdgz.h            |   45 -
 thirdparty/librdkafka-0.11.4/src/rdinterval.h      |  117 -
 thirdparty/librdkafka-0.11.4/src/rdkafka.c         | 3518 ----------------
 thirdparty/librdkafka-0.11.4/src/rdkafka.h         | 4211 --------------------
 .../librdkafka-0.11.4/src/rdkafka_assignor.c       |  551 ---
 .../librdkafka-0.11.4/src/rdkafka_assignor.h       |  159 -
 thirdparty/librdkafka-0.11.4/src/rdkafka_broker.c  | 4038 -------------------
 thirdparty/librdkafka-0.11.4/src/rdkafka_broker.h  |  361 --
 thirdparty/librdkafka-0.11.4/src/rdkafka_buf.c     |  451 ---
 thirdparty/librdkafka-0.11.4/src/rdkafka_buf.h     |  946 -----
 thirdparty/librdkafka-0.11.4/src/rdkafka_cgrp.c    | 3262 ---------------
 thirdparty/librdkafka-0.11.4/src/rdkafka_cgrp.h    |  278 --
 thirdparty/librdkafka-0.11.4/src/rdkafka_conf.c    | 2248 -----------
 thirdparty/librdkafka-0.11.4/src/rdkafka_conf.h    |  350 --
 thirdparty/librdkafka-0.11.4/src/rdkafka_event.c   |  232 --
 thirdparty/librdkafka-0.11.4/src/rdkafka_event.h   |   81 -
 thirdparty/librdkafka-0.11.4/src/rdkafka_feature.c |  444 ---
 thirdparty/librdkafka-0.11.4/src/rdkafka_feature.h |   82 -
 thirdparty/librdkafka-0.11.4/src/rdkafka_header.c  |  222 --
 thirdparty/librdkafka-0.11.4/src/rdkafka_header.h  |   76 -
 thirdparty/librdkafka-0.11.4/src/rdkafka_int.h     |  446 ---
 .../librdkafka-0.11.4/src/rdkafka_interceptor.c    |  675 ----
 .../librdkafka-0.11.4/src/rdkafka_interceptor.h    |   80 -
 thirdparty/librdkafka-0.11.4/src/rdkafka_lz4.c     |  436 --
 thirdparty/librdkafka-0.11.4/src/rdkafka_lz4.h     |   43 -
 .../librdkafka-0.11.4/src/rdkafka_metadata.c       | 1031 -----
 .../librdkafka-0.11.4/src/rdkafka_metadata.h       |  160 -
 .../librdkafka-0.11.4/src/rdkafka_metadata_cache.c |  732 ----
 thirdparty/librdkafka-0.11.4/src/rdkafka_msg.c     | 1277 ------
 thirdparty/librdkafka-0.11.4/src/rdkafka_msg.h     |  381 --
 thirdparty/librdkafka-0.11.4/src/rdkafka_msgset.h  |   50 -
 .../librdkafka-0.11.4/src/rdkafka_msgset_reader.c  | 1137 ------
 .../librdkafka-0.11.4/src/rdkafka_msgset_writer.c  | 1226 ------
 thirdparty/librdkafka-0.11.4/src/rdkafka_offset.c  | 1145 ------
 thirdparty/librdkafka-0.11.4/src/rdkafka_offset.h  |   74 -
 thirdparty/librdkafka-0.11.4/src/rdkafka_op.c      |  660 ---
 thirdparty/librdkafka-0.11.4/src/rdkafka_op.h      |  403 --
 .../librdkafka-0.11.4/src/rdkafka_partition.c      | 3363 ----------------
 .../librdkafka-0.11.4/src/rdkafka_partition.h      |  641 ---
 thirdparty/librdkafka-0.11.4/src/rdkafka_pattern.c |  224 --
 thirdparty/librdkafka-0.11.4/src/rdkafka_pattern.h |   68 -
 thirdparty/librdkafka-0.11.4/src/rdkafka_plugin.c  |  209 -
 thirdparty/librdkafka-0.11.4/src/rdkafka_plugin.h  |   37 -
 thirdparty/librdkafka-0.11.4/src/rdkafka_proto.h   |  502 ---
 thirdparty/librdkafka-0.11.4/src/rdkafka_queue.c   |  866 ----
 thirdparty/librdkafka-0.11.4/src/rdkafka_queue.h   |  769 ----
 .../librdkafka-0.11.4/src/rdkafka_range_assignor.c |  125 -
 thirdparty/librdkafka-0.11.4/src/rdkafka_request.c | 1997 ----------
 thirdparty/librdkafka-0.11.4/src/rdkafka_request.h |  198 -
 .../src/rdkafka_roundrobin_assignor.c              |  114 -
 thirdparty/librdkafka-0.11.4/src/rdkafka_sasl.c    |  343 --
 thirdparty/librdkafka-0.11.4/src/rdkafka_sasl.h    |   49 -
 .../librdkafka-0.11.4/src/rdkafka_sasl_cyrus.c     |  623 ---
 .../librdkafka-0.11.4/src/rdkafka_sasl_int.h       |   72 -
 .../librdkafka-0.11.4/src/rdkafka_sasl_plain.c     |  128 -
 .../librdkafka-0.11.4/src/rdkafka_sasl_scram.c     |  901 -----
 .../librdkafka-0.11.4/src/rdkafka_sasl_win32.c     |  526 ---
 .../librdkafka-0.11.4/src/rdkafka_subscription.c   |  186 -
 thirdparty/librdkafka-0.11.4/src/rdkafka_timer.c   |  292 --
 thirdparty/librdkafka-0.11.4/src/rdkafka_timer.h   |   80 -
 thirdparty/librdkafka-0.11.4/src/rdkafka_topic.c   | 1310 ------
 thirdparty/librdkafka-0.11.4/src/rdkafka_topic.h   |  188 -
 .../librdkafka-0.11.4/src/rdkafka_transport.c      | 1607 --------
 .../librdkafka-0.11.4/src/rdkafka_transport.h      |   79 -
 .../librdkafka-0.11.4/src/rdkafka_transport_int.h  |   87 -
 thirdparty/librdkafka-0.11.4/src/rdlist.c          |  333 --
 thirdparty/librdkafka-0.11.4/src/rdlist.h          |  269 --
 thirdparty/librdkafka-0.11.4/src/rdlog.c           |   89 -
 thirdparty/librdkafka-0.11.4/src/rdlog.h           |   40 -
 thirdparty/librdkafka-0.11.4/src/rdmurmur2.c       |  159 -
 thirdparty/librdkafka-0.11.4/src/rdmurmur2.h       |    7 -
 thirdparty/librdkafka-0.11.4/src/rdports.c         |   60 -
 thirdparty/librdkafka-0.11.4/src/rdports.h         |   36 -
 thirdparty/librdkafka-0.11.4/src/rdposix.h         |  184 -
 thirdparty/librdkafka-0.11.4/src/rdrand.c          |   50 -
 thirdparty/librdkafka-0.11.4/src/rdrand.h          |   48 -
 thirdparty/librdkafka-0.11.4/src/rdregex.c         |  157 -
 thirdparty/librdkafka-0.11.4/src/rdregex.h         |   40 -
 thirdparty/librdkafka-0.11.4/src/rdsignal.h        |   57 -
 thirdparty/librdkafka-0.11.4/src/rdstring.c        |  204 -
 thirdparty/librdkafka-0.11.4/src/rdstring.h        |   59 -
 thirdparty/librdkafka-0.11.4/src/rdsysqueue.h      |  348 --
 thirdparty/librdkafka-0.11.4/src/rdtime.h          |  184 -
 thirdparty/librdkafka-0.11.4/src/rdtypes.h         |   45 -
 thirdparty/librdkafka-0.11.4/src/rdunittest.c      |   63 -
 thirdparty/librdkafka-0.11.4/src/rdunittest.h      |   83 -
 thirdparty/librdkafka-0.11.4/src/rdvarint.c        |  126 -
 thirdparty/librdkafka-0.11.4/src/rdvarint.h        |  169 -
 thirdparty/librdkafka-0.11.4/src/rdwin32.h         |  265 --
 thirdparty/librdkafka-0.11.4/src/regexp.c          | 1156 ------
 thirdparty/librdkafka-0.11.4/src/regexp.h          |   31 -
 thirdparty/librdkafka-0.11.4/src/snappy.c          | 1838 ---------
 thirdparty/librdkafka-0.11.4/src/snappy.h          |   34 -
 thirdparty/librdkafka-0.11.4/src/snappy_compat.h   |  169 -
 thirdparty/librdkafka-0.11.4/src/tinycthread.c     | 1039 -----
 thirdparty/librdkafka-0.11.4/src/tinycthread.h     |  528 ---
 thirdparty/librdkafka-0.11.4/src/win32_config.h    |   45 -
 thirdparty/librdkafka-0.11.4/src/xxhash.c          |  889 -----
 thirdparty/librdkafka-0.11.4/src/xxhash.h          |  293 --
 264 files changed, 46 insertions(+), 89204 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7e34237..91657e8 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -531,7 +531,7 @@ endif()
 ## Create LibRdKafka Extension
 option(ENABLE_LIBRDKAFKA "Enables the librdkafka extension." OFF)
 if (ENABLE_ALL OR ENABLE_LIBRDKAFKA)
-	createExtension(RDKAFKA-EXTENSIONS "RDKAFKA EXTENSIONS" "This Enables librdkafka functionality including PublishKafka" "extensions/librdkafka" "${TEST_DIR}/kafka-tests" "TRUE" "thirdparty/librdkafka-0.11.4")
+	createExtension(RDKAFKA-EXTENSIONS "RDKAFKA EXTENSIONS" "This Enables librdkafka functionality including PublishKafka" "extensions/librdkafka" "${TEST_DIR}/kafka-tests")
 endif()
 
 ## Scripting extensions
diff --git a/cmake/ssl/FindOpenSSL.cmake b/cmake/ssl/FindOpenSSL.cmake
index bbce9ec..0ecdf2e 100644
--- a/cmake/ssl/FindOpenSSL.cmake
+++ b/cmake/ssl/FindOpenSSL.cmake
@@ -22,6 +22,7 @@ set(OPENSSL_INCLUDE_DIR "${LIBRESSL_SRC_DIR}/include" CACHE STRING "" FORCE)
 set(OPENSSL_CRYPTO_LIBRARY "${LIBRESSL_BIN_DIR}/lib/${BYPRODUCT_PREFIX}crypto${BYPRODUCT_SUFFIX}" CACHE STRING "" FORCE)
 set(OPENSSL_SSL_LIBRARY "${LIBRESSL_BIN_DIR}/lib/${BYPRODUCT_PREFIX}ssl${BYPRODUCT_SUFFIX}" CACHE STRING "" FORCE)
 set(OPENSSL_LIBRARIES "${LIBRESSL_BIN_DIR}/lib/${BYPRODUCT_PREFIX}tls${BYPRODUCT_SUFFIX}" ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY} CACHE STRING "" FORCE)
+set(OPENSSL_VERSION "1.0.2" CACHE STRING "" FORCE)
 
  if(NOT TARGET OpenSSL::Crypto )
     add_library(OpenSSL::Crypto UNKNOWN IMPORTED)
@@ -43,4 +44,4 @@ set(OPENSSL_LIBRARIES "${LIBRESSL_BIN_DIR}/lib/${BYPRODUCT_PREFIX}tls${BYPRODUCT
         IMPORTED_LINK_INTERFACE_LANGUAGES "C"
         IMPORTED_LOCATION "${OPENSSL_SSL_LIBRARY}")
     
-  endif()
\ No newline at end of file
+  endif()
diff --git a/extensions/librdkafka/CMakeLists.txt b/extensions/librdkafka/CMakeLists.txt
index aaa4683..e1f4247 100644
--- a/extensions/librdkafka/CMakeLists.txt
+++ b/extensions/librdkafka/CMakeLists.txt
@@ -19,8 +19,6 @@
 
 include(${CMAKE_SOURCE_DIR}/extensions/ExtensionHeader.txt)
 
-include_directories(../../thirdparty/librdkafka-0.11.1/src ./../thirdparty/librdkafka-0.11.1/src-cpp)
-
 file(GLOB SOURCES  "*.cpp")
 
 add_library(minifi-rdkafka-extensions STATIC ${SOURCES})
@@ -32,18 +30,42 @@ if(CMAKE_THREAD_LIBS_INIT)
   target_link_libraries(minifi-rdkafka-extensions "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
+  set(BASE_DIR "${CMAKE_CURRENT_BINARY_DIR}/thirdparty/kafka")
+  if (WIN32)
+  	set(BYPRODUCT "${BASE_DIR}/install/lib/librdkafka.lib")
+  else()
+  	set(BYPRODUCT "${BASE_DIR}/install/lib/librdkafka.a")
+  endif()
 
-# Include UUID
-find_package(UUID REQUIRED)
-target_link_libraries(minifi-rdkafka-extensions ${LIBMINIFI} ${UUID_LIBRARIES})
-target_link_libraries(minifi-rdkafka-extensions ${CMAKE_DL_LIBS} )
-if (LibRdKafka_FOUND AND NOT BUILD_LIBRDKAFKA)
-	target_link_libraries(minifi-rdkafka-extensions ${LibRdKafka_LIBRARIES} )
-else()
-	target_link_libraries(minifi-rdkafka-extensions rdkafka )
+list(APPEND CMAKE_MODULE_PATH_PASSTHROUGH_LIST "${CMAKE_SOURCE_DIR}/cmake/ssl")
+list(APPEND CMAKE_MODULE_PATH_PASSTHROUGH_LIST "${CMAKE_SOURCE_DIR}/cmake/zlib/dummy")
+    
+ExternalProject_Add(
+    kafka-external
+    GIT_REPOSITORY "https://github.com/edenhill/librdkafka.git"
+    GIT_TAG "v1.0.1" 
+    PREFIX "${BASE_DIR}"
+    CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+               "-DCMAKE_INSTALL_PREFIX=${BASE_DIR}/install"
+               "-DWITH_SASL=OFF"
+               "-DOPENSSL_VERSION=1.0.2"
+               "-DRDKAFKA_BUILD_STATIC=ON"
+               "-DRDKAFKA_BUILD_EXAMPLES=OFF"
+               "-DRDKAFKA_BUILD_TESTS=OFF"
+               "-DENABLE_LZ4_EXT=OFF"
+               "-DCMAKE_MODULE_PATH=${CMAKE_MODULE_PATH_PASSTHROUGH_LIST}"
+               "-DCMAKE_C_FLAGS=${CURL_C_FLAGS}"
+               "-DCMAKE_CXX_FLAGS=${CURL_CXX_FLAGS}"
+    EXCLUDE_FROM_ALL TRUE
+  )
+if ( NOT USE_SYSTEM_OPENSSL )
+	add_dependencies(kafka-external libressl-portable)
 endif()
+set(KAFKA_INCLUDE "${BASE_DIR}/install/include/librdkafka/")
+add_dependencies(minifi-rdkafka-extensions kafka-external)
 include_directories(${ZLIB_INCLUDE_DIRS})
-target_link_libraries (minifi-rdkafka-extensions ${ZLIB_LIBRARIES})
+include_directories(${KAFKA_INCLUDE})
+target_link_libraries (minifi-rdkafka-extensions ${BYPRODUCT})
 if (WIN32)
     set_target_properties(minifi-rdkafka-extensions PROPERTIES
         LINK_FLAGS "/WHOLERDKAFKA"
@@ -63,3 +85,4 @@ SET (RDKAFKA-EXTENSIONS minifi-rdkafka-extensions PARENT_SCOPE)
 
 register_extension(minifi-rdkafka-extensions)
 
+register_extension_linter(minifi-rdkafka-extensions-linter)
diff --git a/extensions/librdkafka/PublishKafka.cpp b/extensions/librdkafka/PublishKafka.cpp
index 731645e..db4b84b 100644
--- a/extensions/librdkafka/PublishKafka.cpp
+++ b/extensions/librdkafka/PublishKafka.cpp
@@ -42,21 +42,25 @@ core::Property PublishKafka::SeedBrokers(
 core::Property PublishKafka::Topic(core::PropertyBuilder::createProperty("Topic Name")->withDescription("The Kafka Topic of interest")->isRequired(true)->supportsExpressionLanguage(true)->build());
 
 core::Property PublishKafka::DeliveryGuarantee(
-    core::PropertyBuilder::createProperty("Delivery Guarantee")->withDescription("TSpecifies the requirement for guaranteeing that a message is sent to Kafka")->isRequired(false)
+    core::PropertyBuilder::createProperty("Delivery Guarantee")->withDescription("Specifies the requirement for guaranteeing that a message is sent to Kafka")->isRequired(false)
         ->supportsExpressionLanguage(true)->withDefaultValue("DELIVERY_ONE_NODE")->build());
 
 core::Property PublishKafka::MaxMessageSize(core::PropertyBuilder::createProperty("Max Request Size")->withDescription("Maximum Kafka protocol request message size")->isRequired(false)->build());
 
 core::Property PublishKafka::RequestTimeOut(
-    core::PropertyBuilder::createProperty("Request Timeout")->withDescription("The ack timeout of the producer request in milliseconds")->isRequired(false)->supportsExpressionLanguage(true)->build());
+    core::PropertyBuilder::createProperty("Request Timeout")->withDescription("The ack timeout of the producer request in milliseconds")->isRequired(false)->withDefaultValue<core::TimePeriodValue>(
+        "10 sec")->supportsExpressionLanguage(true)->build());
 
 core::Property PublishKafka::ClientName(
     core::PropertyBuilder::createProperty("Client Name")->withDescription("Client Name to use when communicating with Kafka")->isRequired(true)->supportsExpressionLanguage(true)->build());
 
 /**
- * These needn's have EL support.
+ * These don't appear to need EL support
  */
-core::Property PublishKafka::BatchSize("Batch Size", "Maximum number of messages batched in one MessageSet", "");
+
+core::Property PublishKafka::BatchSize(
+    core::PropertyBuilder::createProperty("Batch Size")->withDescription("Maximum number of messages batched in one MessageSet")->isRequired(false)->withDefaultValue<uint32_t>(10)->build());
+
 core::Property PublishKafka::AttributeNameRegex("Attributes to Send as Headers", "Any attribute whose name matches the regex will be added to the Kafka messages as a Header", "");
 core::Property PublishKafka::QueueBufferMaxTime("Queue Buffering Max Time", "Delay to wait for messages in the producer queue to accumulate before constructing message batches", "");
 core::Property PublishKafka::QueueBufferMaxSize("Queue Max Buffer Size", "Maximum total message size sum allowed on the producer queue", "");
@@ -112,7 +116,6 @@ void PublishKafka::initialize() {
 }
 
 void PublishKafka::onSchedule(const std::shared_ptr<core::ProcessContext> &context, const std::shared_ptr<core::ProcessSessionFactory> &sessionFactory) {
-
 }
 
 bool PublishKafka::configureNewConnection(const std::shared_ptr<KafkaConnection> &conn, const std::shared_ptr<core::ProcessContext> &context, const std::shared_ptr<core::FlowFile> &ff) {
@@ -124,8 +127,6 @@ bool PublishKafka::configureNewConnection(const std::shared_ptr<KafkaConnection>
 
   auto conf_ = rd_kafka_conf_new();
 
-  //auto topic_conf_ = rd_kafka_topic_conf_new();
-
   auto key = conn->getKey();
 
   if (!key->brokers_.empty()) {
@@ -317,7 +318,6 @@ void PublishKafka::onTrigger(const std::shared_ptr<core::ProcessContext> &contex
     conn = connection_pool_.getOrCreateConnection(key);
 
     if (!conn->initialized()) {
-
       logger_->log_trace("Connection not initialized to %s, %s, %s", client_id, brokers, topic);
       // get the ownership so we can configure this connection
       KafkaLease lease = conn->obtainOwnership();
@@ -332,7 +332,6 @@ void PublishKafka::onTrigger(const std::shared_ptr<core::ProcessContext> &contex
     }
 
     if (!conn->hasTopic(topic)) {
-
       auto topic_conf_ = rd_kafka_topic_conf_new();
       auto topic_reference = rd_kafka_topic_new(conn->getConnection(), topic.c_str(), topic_conf_);
       rd_kafka_conf_res_t result;
@@ -363,11 +362,6 @@ void PublishKafka::onTrigger(const std::shared_ptr<core::ProcessContext> &contex
 
       conn->putTopic(topic, kafkaTopicref);
     }
-    /*rd_kafka_conf_set(conf_, "client.id", client_id.c_str(), errstr, sizeof(errstr));
-     logger_->log_debug("PublishKafka: client.id [%s]", value);
-     if (result != RD_KAFKA_CONF_OK)
-     logger_->log_error("PublishKafka: configure error result [%s]", errstr);
-     */
   } else {
     logger_->log_error("Do not have required properties");
     session->transfer(flowFile, Failure);
diff --git a/extensions/librdkafka/PublishKafka.h b/extensions/librdkafka/PublishKafka.h
index de96d17..726632b 100644
--- a/extensions/librdkafka/PublishKafka.h
+++ b/extensions/librdkafka/PublishKafka.h
@@ -172,7 +172,7 @@ class KafkaConnection {
     return nullptr;
   }
 
-  KafkaConnectionKey const *getKey() const {
+  KafkaConnectionKey const * const getKey() const {
     return &key_;
   }
 
diff --git a/thirdparty/librdkafka-0.11.4/.appveyor.yml b/thirdparty/librdkafka-0.11.4/.appveyor.yml
deleted file mode 100644
index 2cb8722..0000000
--- a/thirdparty/librdkafka-0.11.4/.appveyor.yml
+++ /dev/null
@@ -1,88 +0,0 @@
-version: 0.11.4-R-pre{build}
-pull_requests:
-  do_not_increment_build_number: true
-image: Visual Studio 2013
-configuration: Release
-environment:
-  matrix:
-  - platform: x64
-  - platform: win32
-install:
-- ps: "$OpenSSLVersion = \"1_0_2o\"\n$OpenSSLExe = \"OpenSSL-$OpenSSLVersion.exe\"\n\nRemove-Item C:\\OpenSSL-Win32 -recurse\nRemove-Item C:\\OpenSSL-Win64 -recurse\n\nWrite-Host \"Installing OpenSSL v1.0 32-bit ...\" -ForegroundColor Cyan\nWrite-Host \"Downloading...\"\n$exePath = \"$($env:USERPROFILE)\\Win32OpenSSL-1_0_2o.exe\"\n(New-Object Net.WebClient).DownloadFile('https://slproweb.com/download/Win32OpenSSL-1_0_2o.exe', $exePath)\nWrite-Host \"Installing...\"\ncmd /c start /wait $e [...]
-cache:
-- c:\OpenSSL-Win32
-- c:\OpenSSL-Win64
-nuget:
-  account_feed: true
-  project_feed: true
-  disable_publish_on_pr: true
-before_build:
-- cmd: nuget restore win32/librdkafka.sln
-build:
-  project: win32/librdkafka.sln
-  publish_nuget: true
-  publish_nuget_symbols: true
-  include_nuget_references: true
-  parallel: true
-  verbosity: normal
-test_script:
-- cmd: if exist DISABLED\win32\outdir\v140 ( win32\outdir\v140\%PLATFORM%\%CONFIGURATION%\tests.exe -l -p1 ) else ( win32\outdir\v120\%PLATFORM%\%CONFIGURATION%\tests.exe -l -p1 )
-artifacts:
-- path: test_report*.json
-  name: Test report
-- path: '*.nupkg'
-  name: Packages
-- path: '**\*.dll'
-  name: Libraries
-- path: '**\*.lib'
-  name: Libraries
-- path: '**\*.pdb'
-  name: Libraries
-- path: '**\*.exe'
-  name: Executables
-before_deploy:
-- ps: >-
-    # FIXME: Add to Deployment condition above:
-
-    # APPVEYOR_REPO_TAG = true
-
-
-
-    # This is the CoApp .autopkg file to create.
-
-    $autopkgFile = "win32/librdkafka.autopkg"
-
-
-    # Get the ".autopkg.template" file, replace "@version" with the Appveyor version number, then save to the ".autopkg" file.
-
-    cat ($autopkgFile + ".template") | % { $_ -replace "@version", $env:appveyor_build_version } > $autopkgFile
-
-
-    # Use the CoApp tools to create NuGet native packages from the .autopkg.
-
-    Write-NuGetPackage $autopkgFile
-
-
-    # Push all newly created .nupkg files as Appveyor artifacts for later deployment.
-
-    Get-ChildItem .\*.nupkg | % { Push-AppveyorArtifact $_.FullName -FileName $_.Name }
-deploy:
-- provider: S3
-  access_key_id:
-    secure: t+Xo4x1mYVbqzvUDlnuMgFGp8LjQJNOfsDUAMxBsVH4=
-  secret_access_key:
-    secure: SNziQPPJs4poCHM7dk6OxufUYcGQhMWiNPx6Y1y6DYuWGjPc3K0APGeousLHsbLv
-  region: us-west-1
-  bucket: librdkafka-ci-packages
-  folder: librdkafka/p-librdkafka__bld-appveyor__plat-windows__arch-$(platform)__bldtype-$(configuration)__tag-$(APPVEYOR_REPO_TAG_NAME)__sha-$(APPVEYOR_REPO_COMMIT)__bid-$(APPVEYOR_BUILD_ID)
-  artifact: /.*\.(nupkg)/
-  max_error_retry: 3
-  on:
-    APPVEYOR_REPO_TAG: true
-notifications:
-- provider: Email
-  to:
-  - magnus@edenhill.se
-  on_build_success: false
-  on_build_failure: true
-  on_build_status_changed: true
diff --git a/thirdparty/librdkafka-0.11.4/.dir-locals.el b/thirdparty/librdkafka-0.11.4/.dir-locals.el
deleted file mode 100644
index 22ca922..0000000
--- a/thirdparty/librdkafka-0.11.4/.dir-locals.el
+++ /dev/null
@@ -1,3 +0,0 @@
-( (c-mode . ((c-file-style . "linux"))) )
-((nil . ((compile-command . "LC_ALL=C make -C $(git rev-parse --show-toplevels) -k"))))
-
diff --git a/thirdparty/librdkafka-0.11.4/.doozer.json b/thirdparty/librdkafka-0.11.4/.doozer.json
deleted file mode 100644
index 27252da..0000000
--- a/thirdparty/librdkafka-0.11.4/.doozer.json
+++ /dev/null
@@ -1,110 +0,0 @@
-{
-  "targets": {
-    "xenial-amd64": {
-
-      "buildenv": "xenial-amd64",
-      "builddeps": [
-        "build-essential",
-        "python",
-        "zlib1g-dev",
-        "libssl-dev",
-        "libsasl2-dev"
-      ],
-      "buildcmd": [
-        "./configure",
-          "make -j ${PARALLEL}",
-          "make -C tests build"
-      ],
-      "testcmd": [
-          "make -C tests run_local"
-      ],
-    },
-
-    "xenial-i386": {
-      "_comment": "including liblz4-dev here to verify that WITH_LZ4_EXT works",
-      "buildenv": "xenial-i386",
-      "builddeps": [
-        "build-essential",
-        "python",
-        "zlib1g-dev",
-        "libssl-dev",
-        "libsasl2-dev",
-        "liblz4-dev"
-      ],
-      "buildcmd": [
-        "./configure",
-        "make -j ${PARALLEL}",
-        "make -C tests build"
-      ],
-      "testcmd": [
-        "make -C tests run_local"
-      ],
-    },
-
-    "xenial-armhf": {
-
-      "buildenv": "xenial-armhf",
-      "builddeps": [
-        "build-essential",
-        "python",
-        "zlib1g-dev",
-        "libssl-dev",
-        "libsasl2-dev"
-      ],
-      "buildcmd": [
-        "./configure",
-        "make -j ${PARALLEL}",
-          "make -j ${PARALLEL} -C tests build",
-      ],
-      "testcmd": [
-        "cd tests",
-        "./run-test.sh -p1 -l ./merged",
-        "cd .."
-      ],
-    },
-
-    "stretch-mips": {
-
-      "buildenv": "stretch-mips",
-      "builddeps": [
-        "build-essential",
-        "python",
-        "zlib1g-dev",
-        "libssl-dev",
-        "libsasl2-dev"
-      ],
-      "buildcmd": [
-        "./configure",
-        "make -j ${PARALLEL}",
-          "make -j ${PARALLEL} -C tests build",
-      ],
-      "testcmd": [
-        "cd tests",
-        "./run-test.sh -p1 -l ./merged",
-        "cd .."
-      ],
-    },
-
-    "cmake-xenial-amd64": {
-
-      "buildenv": "xenial-amd64",
-      "builddeps": [
-        "build-essential",
-        "python",
-        "zlib1g-dev",
-        "libssl-dev",
-        "libsasl2-dev",
-        "cmake"
-      ],
-      "buildcmd": [
-        "cmake -H. -B_builds -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_BUILD_TYPE=Debug",
-        "cmake --build _builds",
-      ],
-      "testcmd": [
-        "cd _builds",
-        "ctest -VV -R RdKafkaTestBrokerLess"
-      ],
-    }
-  },
-  "artifacts": ["config.log", "Makefile.config", "config.h"]
-}
diff --git a/thirdparty/librdkafka-0.11.4/.github/ISSUE_TEMPLATE b/thirdparty/librdkafka-0.11.4/.github/ISSUE_TEMPLATE
deleted file mode 100644
index eb538b3..0000000
--- a/thirdparty/librdkafka-0.11.4/.github/ISSUE_TEMPLATE
+++ /dev/null
@@ -1,32 +0,0 @@
-Read the FAQ first: https://github.com/edenhill/librdkafka/wiki/FAQ
-
-
-
-Description
-===========
-<your issue description goes here>
-
-
-How to reproduce
-================
-<your steps how to reproduce goes here, or remove section if not relevant>
-
-
-**IMPORTANT**: Always try to reproduce the issue on the latest released version (see https://github.com/edenhill/librdkafka/releases), if it can't be reproduced on the latest version the issue has been fixed.
-
-
-Checklist
-=========
-
-**IMPORTANT**: We will close issues where the checklist has not been completed.
-
-Please provide the following information:
-
- - [x] librdkafka version (release number or git tag): `<REPLACE with e.g., v0.10.5 or a git sha. NOT "latest" or "current">`
- - [ ] Apache Kafka version: `<REPLACE with e.g., 0.10.2.3>`
- - [ ] librdkafka client configuration: `<REPLACE with e.g., message.timeout.ms=123, auto.reset.offset=earliest, ..>`
- - [ ] Operating system: `<REPLACE with e.g., Centos 5 (x64)>`
- - [ ] Provide logs (with `debug=..` as necessary) from librdkafka
- - [ ] Provide broker log excerpts
- - [ ] Critical issue
-
diff --git a/thirdparty/librdkafka-0.11.4/.gitignore b/thirdparty/librdkafka-0.11.4/.gitignore
deleted file mode 100644
index 0598bca..0000000
--- a/thirdparty/librdkafka-0.11.4/.gitignore
+++ /dev/null
@@ -1,28 +0,0 @@
-config.h
-config.log*
-config.cache
-Makefile.config
-rdkafka*.pc
-*~
-\#*
-*.o
-*.so
-*.so.?
-*.dylib
-*.a
-*.d
-librdkafka*.lds
-core
-vgcore.*
-*dSYM/
-*.offset
-SOURCES
-gmon.out
-*.gz
-*.bz2
-*.deb
-*.rpm
-staging-docs
-tmp
-stats*.json
-test_report*.json
diff --git a/thirdparty/librdkafka-0.11.4/.travis.yml b/thirdparty/librdkafka-0.11.4/.travis.yml
deleted file mode 100644
index 4154de5..0000000
--- a/thirdparty/librdkafka-0.11.4/.travis.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-language: c
-cache: ccache
-env:
-- ARCH=x64
-compiler:
-- gcc
-- clang
-os:
-- linux
-- osx
-dist: trusty
-sudo: false
-before_install:
-  - if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then sudo make -C packaging/rpm MOCK_CONFIG=el7-x86_64 prepare_ubuntu ; fi
-before_script:
- - ccache -s || echo "CCache is not available."
-script:
-- rm -rf artifacts dest
-- mkdir dest artifacts
-- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then CPPFLAGS="-I/usr/local/opt/openssl/include
-  -L/usr/local/opt/openssl/lib" ./configure --enable-static --disable-lz4 --prefix="$PWD/dest" ; else ./configure --enable-static --disable-lz4 --prefix="$PWD/dest" ; fi
-- make -j2 all examples check && make -C tests run_local
-- make install
-- (cd dest && tar cvzf ../artifacts/librdkafka.tar.gz .)
-- if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then sudo make -C packaging/rpm MOCK_CONFIG=el7-x86_64 all copy-artifacts ; fi
-- if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then docker run -it -v $PWD:/v microsoft/dotnet:2-sdk /v/packaging/tools/build-debian.sh /v /v/artifacts/librdkafka-debian9.tgz; fi
-deploy:
-  provider: s3
-  access_key_id:
-    secure: "nGcknL5JZ5XYCEJ96UeDtnLOOidWsfXrk2x91Z9Ip2AyrUtdfZBc8BX16C7SAQbBeb4PQu/OjRBQWTIRqU64ZEQU1Z0lHjxCiGEt5HO0YlXWvZ8OJGAQ0wSmrQED850lWjGW2z5MpDqqxbZyATE8VksW5dtGiHgNuITinVW8Lok="
-  secret_access_key:
-    secure: "J+LygNeoXQImN9E7EARNmcgLpqm6hoRjxwHJaen9opeuSDowKDpZxP7ixSml3BEn2pJJ4kpsdj5A8t5uius+qC4nu9mqSAZcmdKeSmliCbH7kj4J9MR7LBcXk3Uf515QGm7y4nzw+c1PmpteYL5S06Kgqp+KkPRLKTS2NevVZuY="
-  bucket: librdkafka-ci-packages
-  region: us-west-1
-  skip_cleanup: true
-  local-dir: artifacts
-  upload-dir: librdkafka/p-librdkafka__bld-travis__plat-${TRAVIS_OS_NAME}__arch-${ARCH}__tag-${TRAVIS_TAG}__sha-${TRAVIS_COMMIT}__bid-${TRAVIS_JOB_NUMBER}
-  on:
-    condition: "$CC = gcc"
-    repo: edenhill/librdkafka
-    all_branches: true
-    tags: true
diff --git a/thirdparty/librdkafka-0.11.4/CMakeLists.txt b/thirdparty/librdkafka-0.11.4/CMakeLists.txt
deleted file mode 100644
index b5dd530..0000000
--- a/thirdparty/librdkafka-0.11.4/CMakeLists.txt
+++ /dev/null
@@ -1,183 +0,0 @@
-cmake_minimum_required(VERSION 3.2)
-project(RdKafka)
-
-# Options. No 'RDKAFKA_' prefix to match old C++ code. {
-
-# This option doesn't affect build in fact, only C code
-# (see 'rd_kafka_version_str'). In CMake the build type feature usually used
-# (like Debug, Release, etc.).
-option(WITHOUT_OPTIMIZATION "Disable optimization" OFF)
-
-option(ENABLE_DEVEL "Enable development asserts, checks, etc" OFF)
-option(ENABLE_REFCNT_DEBUG "Enable refcnt debugging" OFF)
-option(ENABLE_SHAREDPTR_DEBUG "Enable sharedptr debugging" OFF)
-
-set(TRYCOMPILE_SRC_DIR "${CMAKE_CURRENT_LIST_DIR}/packaging/cmake/try_compile")
-
-# ZLIB {
-if(ZLIB_FOUND)
-message("ZLIB FOUND")
-  set(with_zlib_default ON)
-else()
-message("ZLIB FOUND")
-  set(with_zlib_default OFF)
-endif()
-option(WITH_ZLIB "With ZLIB" ${with_zlib_default})
-# }
-
-# LibDL {
-try_compile(
-    WITH_LIBDL
-    "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
-    "${TRYCOMPILE_SRC_DIR}/dlopen_test.c"
-    LINK_LIBRARIES "${CMAKE_DL_LIBS}"
-)
-# }
-
-# WITH_PLUGINS {
-if(WITH_LIBDL)
-  set(with_plugins_default ON)
-else()
-  set(with_plugins_default OFF)
-endif()
-option(WITH_PLUGINS "With plugin support" ${with_plugins_default})
-# }
-
-# OpenSSL {
-if(WITH_BUNDLED_SSL) # option from 'h2o' parent project
-  set(with_ssl_default ON)
-else()
-  find_package(OpenSSL QUIET)
-  if(OpenSSL_FOUND)
-    set(with_ssl_default ON)
-  else()
-    set(with_ssl_default OFF)
-  endif()
-endif()
-option(WITH_SSL "With SSL" ${with_ssl_default})
-# }
-
-# SASL {
-if(WIN32)
-  set(with_sasl_default ON)
-else()
-  include(FindPkgConfig)
-  pkg_check_modules(SASL libsasl2)
-  if(SASL_FOUND)
-    set(with_sasl_default ON)
-  else()
-    try_compile(
-        WITH_SASL_CYRUS_BOOL
-        "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
-        "${TRYCOMPILE_SRC_DIR}/libsasl2_test.c"
-        LINK_LIBRARIES "-lsasl2"
-        )
-     if(WITH_SASL_CYRUS_BOOL)
-        set(with_sasl_default ON)
-        set(SASL_LIBRARIES "-lsasl2")
-     else()
-        set(with_sasl_default OFF)
-     endif()
-  endif()
-endif()
-option(WITH_SASL "With SASL" ${with_sasl_default})
-if(WITH_SASL)
-  if(WITH_SSL)
-    set(WITH_SASL_SCRAM ON)
-  endif()
-  if(NOT WIN32)
-    set(WITH_SASL_CYRUS ON)
-  endif()
-endif()
-# }
-
-# }
-
-option(RDKAFKA_BUILD_EXAMPLES "Build examples" OFF)
-option(RDKAFKA_BUILD_TESTS "Build tests" OFF)
-if(WIN32)
-    option(WITHOUT_WIN32_CONFIG "Avoid including win32_config.h on cmake builds" ON)
-endif(WIN32)
-
-# In:
-# * TRYCOMPILE_SRC_DIR
-# Out:
-# * HAVE_ATOMICS_32
-# * HAVE_ATOMICS_32_SYNC
-# * HAVE_ATOMICS_64
-# * HAVE_ATOMICS_64_SYNC
-# * HAVE_REGEX
-# * HAVE_STRNDUP
-# * LINK_ATOMIC
-include("packaging/cmake/try_compile/rdkafka_setup.cmake")
-
-set(GENERATED_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
-
-# In:
-# * WITHOUT_OPTIMIZATION
-# * ENABLE_DEVEL
-# * ENABLE_REFCNT_DEBUG
-# * ENABLE_SHAREDPTR_DEBUG
-# * HAVE_ATOMICS_32
-# * HAVE_ATOMICS_32_SYNC
-# * HAVE_ATOMICS_64
-# * HAVE_ATOMICS_64_SYNC
-# * WITH_ZLIB
-# * WITH_SSL
-# * WITH_SASL
-# * HAVE_REGEX
-# * HAVE_STRNDUP
-configure_file("packaging/cmake/config.h.in" "${GENERATED_DIR}/config.h")
-
-# Installation (https://github.com/forexample/package-example) {
-
-include(GNUInstallDirs)
-
-set(config_install_dir "lib/cmake/${PROJECT_NAME}")
-
-set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
-
-set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake")
-set(targets_export_name "${PROJECT_NAME}Targets")
-set(namespace "${PROJECT_NAME}::")
-
-include(CMakePackageConfigHelpers)
-
-# In:
-#   * targets_export_name
-#   * PROJECT_NAME
-configure_package_config_file(
-    "packaging/cmake/Config.cmake.in"
-    "${project_config}"
-    INSTALL_DESTINATION "${config_install_dir}"
-)
-
-install(
-    FILES "${project_config}"
-    DESTINATION "${config_install_dir}"
-)
-
-install(
-    EXPORT "${targets_export_name}"
-    NAMESPACE "${namespace}"
-    DESTINATION "${config_install_dir}"
-)
-
-install(
-    FILES LICENSES.txt
-    DESTINATION "share/licenses/librdkafka"
-)
-
-# }
-
-add_subdirectory(src)
-add_subdirectory(src-cpp)
-
-if(RDKAFKA_BUILD_EXAMPLES)
-  add_subdirectory(examples)
-endif()
-
-if(RDKAFKA_BUILD_TESTS)
-  enable_testing()
-  add_subdirectory(tests)
-endif()
diff --git a/thirdparty/librdkafka-0.11.4/CODE_OF_CONDUCT.md b/thirdparty/librdkafka-0.11.4/CODE_OF_CONDUCT.md
deleted file mode 100644
index dbbde19..0000000
--- a/thirdparty/librdkafka-0.11.4/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Contributor Covenant Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at rdkafka@edenhill.se. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
-
-[homepage]: http://contributor-covenant.org
-[version]: http://contributor-covenant.org/version/1/4/
diff --git a/thirdparty/librdkafka-0.11.4/CONFIGURATION.md b/thirdparty/librdkafka-0.11.4/CONFIGURATION.md
deleted file mode 100644
index 7bc060f..0000000
--- a/thirdparty/librdkafka-0.11.4/CONFIGURATION.md
+++ /dev/null
@@ -1,138 +0,0 @@
-//@file
-## Global configuration properties
-
-Property                                 | C/P | Range           |       Default | Description              
------------------------------------------|-----|-----------------|--------------:|--------------------------
-builtin.features                         |  *  |                 | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support. <br>*Type: CSV flags*
-client.id                                |  *  |                 |       rdkafka | Client identifier. <br>*Type: string*
-metadata.broker.list                     |  *  |                 |               | Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime. <br>*Type: string*
-bootstrap.servers                        |  *  |                 |               | Alias for `metadata.broker.list`
-message.max.bytes                        |  *  | 1000 .. 1000000000 |       1000000 | Maximum Kafka protocol request message size. <br>*Type: integer*
-message.copy.max.bytes                   |  *  | 0 .. 1000000000 |         65535 | Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs. <br>*Type: integer*
-receive.message.max.bytes                |  *  | 1000 .. 2147483647 |     100000000 | Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value is automatically adjusted upwards to be at least `fetch.max.bytes` + 512 to allow for protocol overhead. <br>*Type: integer*
-max.in.flight.requests.per.connection    |  *  | 1 .. 1000000    |       1000000 | Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one. <br>*Type: integer*
-max.in.flight                            |  *  |                 |               | Alias for `max.in.flight.requests.per.connection`
-metadata.request.timeout.ms              |  *  | 10 .. 900000    |         60000 | Non-topic request timeout in milliseconds. This is for metadata requests, etc. <br>*Type: integer*
-topic.metadata.refresh.interval.ms       |  *  | -1 .. 3600000   |        300000 | Topic metadata refresh interval in milliseconds. The metadata is automatically refreshed on error and connect. Use -1 to disable the intervalled refresh. <br>*Type: integer*
-metadata.max.age.ms                      |  *  | 1 .. 86400000   |            -1 | Metadata cache max age. Defaults to metadata.refresh.interval.ms * 3 <br>*Type: integer*
-topic.metadata.refresh.fast.interval.ms  |  *  | 1 .. 60000      |           250 | When a topic loses its leader a new metadata request will be enqueued with this initial interval, exponentially increasing until the topic metadata has been refreshed. This is used to recover quickly from transitioning leader brokers. <br>*Type: integer*
-topic.metadata.refresh.fast.cnt          |  *  | 0 .. 1000       |            10 | *Deprecated: No longer used.* <br>*Type: integer*
-topic.metadata.refresh.sparse            |  *  | true, false     |          true | Sparse metadata requests (consumes less network bandwidth) <br>*Type: boolean*
-topic.blacklist                          |  *  |                 |               | Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist. <br>*Type: pattern list*
-debug                                    |  *  | generic, broker, topic, metadata, feature, queue, msg, protocol, cgrp, security, fetch, interceptor, plugin, consumer, all |               | A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch <br>*Type: CSV flags*
-socket.timeout.ms                        |  *  | 10 .. 300000    |         60000 | Default timeout for network requests. Producer: ProduceRequests will use the lesser value of socket.timeout.ms and remaining message.timeout.ms for the first message in the batch. Consumer: FetchRequests will use fetch.wait.max.ms + socket.timeout.ms.  <br>*Type: integer*
-socket.blocking.max.ms                   |  *  | 1 .. 60000      |          1000 | Maximum time a broker socket operation may block. A lower value improves responsiveness at the expense of slightly higher CPU usage. **Deprecated** <br>*Type: integer*
-socket.send.buffer.bytes                 |  *  | 0 .. 100000000  |             0 | Broker socket send buffer size. System default is used if 0. <br>*Type: integer*
-socket.receive.buffer.bytes              |  *  | 0 .. 100000000  |             0 | Broker socket receive buffer size. System default is used if 0. <br>*Type: integer*
-socket.keepalive.enable                  |  *  | true, false     |         false | Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets <br>*Type: boolean*
-socket.nagle.disable                     |  *  | true, false     |         false | Disable the Nagle algorithm (TCP_NODELAY). <br>*Type: boolean*
-socket.max.fails                         |  *  | 0 .. 1000000    |             1 | Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. NOTE: The connection is automatically re-established. <br>*Type: integer*
-broker.address.ttl                       |  *  | 0 .. 86400000   |          1000 | How long to cache the broker address resolving results (milliseconds). <br>*Type: integer*
-broker.address.family                    |  *  | any, v4, v6     |           any | Allowed broker IP address families: any, v4, v6 <br>*Type: enum value*
-reconnect.backoff.jitter.ms              |  *  | 0 .. 3600000    |           500 | Throttle broker reconnection attempts by this value +-50%. <br>*Type: integer*
-statistics.interval.ms                   |  *  | 0 .. 86400000   |             0 | librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics. <br>*Type: integer*
-enabled_events                           |  *  | 0 .. 2147483647 |             0 | See `rd_kafka_conf_set_events()` <br>*Type: integer*
-error_cb                                 |  *  |                 |               | Error callback (set with rd_kafka_conf_set_error_cb()) <br>*Type: pointer*
-throttle_cb                              |  *  |                 |               | Throttle callback (set with rd_kafka_conf_set_throttle_cb()) <br>*Type: pointer*
-stats_cb                                 |  *  |                 |               | Statistics callback (set with rd_kafka_conf_set_stats_cb()) <br>*Type: pointer*
-log_cb                                   |  *  |                 |               | Log callback (set with rd_kafka_conf_set_log_cb()) <br>*Type: pointer*
-log_level                                |  *  | 0 .. 7          |             6 | Logging level (syslog(3) levels) <br>*Type: integer*
-log.queue                                |  *  | true, false     |         false | Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set. <br>*Type: boolean*
-log.thread.name                          |  *  | true, false     |          true | Print internal thread name in log messages (useful for debugging librdkafka internals) <br>*Type: boolean*
-log.connection.close                     |  *  | true, false     |          true | Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connection.max.idle.ms` value. <br>*Type: boolean*
-socket_cb                                |  *  |                 |               | Socket creation callback to provide race-free CLOEXEC <br>*Type: pointer*
-connect_cb                               |  *  |                 |               | Socket connect callback <br>*Type: pointer*
-closesocket_cb                           |  *  |                 |               | Socket close callback <br>*Type: pointer*
-open_cb                                  |  *  |                 |               | File open callback to provide race-free CLOEXEC <br>*Type: pointer*
-opaque                                   |  *  |                 |               | Application opaque (set with rd_kafka_conf_set_opaque()) <br>*Type: pointer*
-default_topic_conf                       |  *  |                 |               | Default topic configuration for automatically subscribed topics <br>*Type: pointer*
-internal.termination.signal              |  *  | 0 .. 128        |             0 | Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed. <br>*Type: integer*
-api.version.request                      |  *  | true, false     |          true | Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used. <br>*Type: boolean*
-api.version.request.timeout.ms           |  *  | 1 .. 300000     |         10000 | Timeout for broker API version requests. <br>*Type: integer*
-api.version.fallback.ms                  |  *  | 0 .. 604800000  |       1200000 | Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade). <br>*Type: integer*
-broker.version.fallback                  |  *  |                 |         0.9.0 | Older broker versions (<0.10.0) provides no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disable [...]
-security.protocol                        |  *  | plaintext, ssl, sasl_plaintext, sasl_ssl |     plaintext | Protocol used to communicate with brokers. <br>*Type: enum value*
-ssl.cipher.suites                        |  *  |                 |               | A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3). <br>*Type: string*
-ssl.key.location                         |  *  |                 |               | Path to client's private key (PEM) used for authentication. <br>*Type: string*
-ssl.key.password                         |  *  |                 |               | Private key passphrase <br>*Type: string*
-ssl.certificate.location                 |  *  |                 |               | Path to client's public key (PEM) used for authentication. <br>*Type: string*
-ssl.ca.location                          |  *  |                 |               | File or directory path to CA certificate(s) for verifying the broker's key. <br>*Type: string*
-ssl.crl.location                         |  *  |                 |               | Path to CRL for verifying broker's certificate validity. <br>*Type: string*
-ssl.keystore.location                    |  *  |                 |               | Path to client's keystore (PKCS#12) used for authentication. <br>*Type: string*
-ssl.keystore.password                    |  *  |                 |               | Client's keystore (PKCS#12) password. <br>*Type: string*
-sasl.mechanisms                          |  *  |                 |        GSSAPI | SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. **NOTE**: Despite the name only one mechanism must be configured. <br>*Type: string*
-sasl.mechanism                           |  *  |                 |               | Alias for `sasl.mechanisms`
-sasl.kerberos.service.name               |  *  |                 |         kafka | Kerberos principal name that Kafka runs as, not including /hostname@REALM <br>*Type: string*
-sasl.kerberos.principal                  |  *  |                 |   kafkaclient | This client's Kerberos principal name. (Not supported on Windows, will use the logon user's principal). <br>*Type: string*
-sasl.kerberos.kinit.cmd                  |  *  |                 | kinit -S "%{sasl.kerberos.service.name}/%{broker.name}" -k -t "%{sasl.kerberos.keytab}" %{sasl.kerberos.principal} | Full kerberos kinit command string, %{config.prop.name} is replaced by corresponding config object value, %{broker.name} returns the broker's hostname. <br>*Type: string*
-sasl.kerberos.keytab                     |  *  |                 |               | Path to Kerberos keytab file. Uses system default if not set.**NOTE**: This is not automatically used but must be added to the template in sasl.kerberos.kinit.cmd as ` ... -t %{sasl.kerberos.keytab}`. <br>*Type: string*
-sasl.kerberos.min.time.before.relogin    |  *  | 1 .. 86400000   |         60000 | Minimum time in milliseconds between key refresh attempts. <br>*Type: integer*
-sasl.username                            |  *  |                 |               | SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms <br>*Type: string*
-sasl.password                            |  *  |                 |               | SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism <br>*Type: string*
-plugin.library.paths                     |  *  |                 |               | List of plugin libaries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically. <br>*Type: string*
-interceptors                             |  *  |                 |               | Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors. <br>*Type: *
-group.id                                 |  *  |                 |               | Client group id string. All clients sharing the same group.id belong to the same group. <br>*Type: string*
-partition.assignment.strategy            |  *  |                 | range,roundrobin | Name of partition assignment strategy to use when elected group leader assigns partitions to group members. <br>*Type: string*
-session.timeout.ms                       |  *  | 1 .. 3600000    |         30000 | Client group session and failure detection timeout. <br>*Type: integer*
-heartbeat.interval.ms                    |  *  | 1 .. 3600000    |          1000 | Group session keepalive heartbeat interval. <br>*Type: integer*
-group.protocol.type                      |  *  |                 |      consumer | Group protocol type <br>*Type: string*
-coordinator.query.interval.ms            |  *  | 1 .. 3600000    |        600000 | How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment. <br>*Type: integer*
-enable.auto.commit                       |  C  | true, false     |          true | Automatically and periodically commit offsets in the background. Note: setting this to false does not prevent the consumer from fetching previously committed start offsets. To circumvent this behaviour set specific start offsets per partition in the call to assign(). <br>*Type: boolean*
-auto.commit.interval.ms                  |  C  | 0 .. 86400000   |          5000 | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. (0 = disable). This setting is used by the high-level consumer. <br>*Type: integer*
-enable.auto.offset.store                 |  C  | true, false     |          true | Automatically store offset of last message provided to application. <br>*Type: boolean*
-queued.min.messages                      |  C  | 1 .. 10000000   |        100000 | Minimum number of messages per topic+partition librdkafka tries to maintain in the local consumer queue. <br>*Type: integer*
-queued.max.messages.kbytes               |  C  | 1 .. 2097151    |       1048576 | Maximum number of kilobytes per topic+partition in the local consumer queue. This value may be overshot by fetch.message.max.bytes. This property has higher priority than queued.min.messages. <br>*Type: integer*
-fetch.wait.max.ms                        |  C  | 0 .. 300000     |           100 | Maximum time the broker may wait to fill the response with fetch.min.bytes. <br>*Type: integer*
-fetch.message.max.bytes                  |  C  | 1 .. 1000000000 |       1048576 | Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched. <br>*Type: integer*
-max.partition.fetch.bytes                |  C  |                 |               | Alias for `fetch.message.max.bytes`
-fetch.max.bytes                          |  C  | 0 .. 2147483135 |      52428800 | Maximum amount of data the broker shall return for a Fetch request. Messages are fetched in batches by the consumer and if the first message batch in the first non-empty partition of the Fetch request is larger than this value, then the message batch will still be returned to ensure the consumer can make progress. The maximum message batch size accepted by the broker is defined via `message.max.bytes` (bro [...]
-fetch.min.bytes                          |  C  | 1 .. 100000000  |             1 | Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting. <br>*Type: integer*
-fetch.error.backoff.ms                   |  C  | 0 .. 300000     |           500 | How long to postpone the next fetch request for a topic+partition in case of a fetch error. <br>*Type: integer*
-offset.store.method                      |  C  | none, file, broker |        broker | Offset commit store method: 'file' - local file store (offset.store.path, et.al), 'broker' - broker commit store (requires Apache Kafka 0.8.2 or later on the broker). <br>*Type: enum value*
-consume_cb                               |  C  |                 |               | Message consume callback (set with rd_kafka_conf_set_consume_cb()) <br>*Type: pointer*
-rebalance_cb                             |  C  |                 |               | Called after consumer group has been rebalanced (set with rd_kafka_conf_set_rebalance_cb()) <br>*Type: pointer*
-offset_commit_cb                         |  C  |                 |               | Offset commit result propagation callback. (set with rd_kafka_conf_set_offset_commit_cb()) <br>*Type: pointer*
-enable.partition.eof                     |  C  | true, false     |          true | Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition. <br>*Type: boolean*
-check.crcs                               |  C  | true, false     |         false | Verify CRC32 of consumed messages, ensuring no on-the-wire or on-disk corruption to the messages occurred. This check comes at slightly increased CPU usage. <br>*Type: boolean*
-queue.buffering.max.messages             |  P  | 1 .. 10000000   |        100000 | Maximum number of messages allowed on the producer queue. <br>*Type: integer*
-queue.buffering.max.kbytes               |  P  | 1 .. 2097151    |       1048576 | Maximum total message size sum allowed on the producer queue. This property has higher priority than queue.buffering.max.messages. <br>*Type: integer*
-queue.buffering.max.ms                   |  P  | 0 .. 900000     |             0 | Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency. <br>*Type: integer*
-linger.ms                                |  P  |                 |               | Alias for `queue.buffering.max.ms`
-message.send.max.retries                 |  P  | 0 .. 10000000   |             2 | How many times to retry sending a failing MessageSet. **Note:** retrying may cause reordering. <br>*Type: integer*
-retries                                  |  P  |                 |               | Alias for `message.send.max.retries`
-retry.backoff.ms                         |  P  | 1 .. 300000     |           100 | The backoff time in milliseconds before retrying a protocol request. <br>*Type: integer*
-queue.buffering.backpressure.threshold   |  P  | 0 .. 1000000    |            10 | The threshold of outstanding not yet transmitted requests needed to backpressure the producer's message accumulator. A lower number yields larger and more effective batches. <br>*Type: integer*
-compression.codec                        |  P  | none, gzip, snappy, lz4 |          none | compression codec to use for compressing message sets. This is the default value for all topics, may be overriden by the topic configuration property `compression.codec`.  <br>*Type: enum value*
-compression.type                         |  P  |                 |               | Alias for `compression.codec`
-batch.num.messages                       |  P  | 1 .. 1000000    |         10000 | Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by message.max.bytes. <br>*Type: integer*
-delivery.report.only.error               |  P  | true, false     |         false | Only provide delivery reports for failed messages. <br>*Type: boolean*
-dr_cb                                    |  P  |                 |               | Delivery report callback (set with rd_kafka_conf_set_dr_cb()) <br>*Type: pointer*
-dr_msg_cb                                |  P  |                 |               | Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb()) <br>*Type: pointer*
-
-
-## Topic configuration properties
-
-Property                                 | C/P | Range           |       Default | Description              
------------------------------------------|-----|-----------------|--------------:|--------------------------
-request.required.acks                    |  P  | -1 .. 1000      |             1 | This field indicates how many acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *1*=Only the leader broker will need to ack the message, *-1* or *all*=broker will block until message is committed by all in sync replicas (ISRs) or broker's `min.insync.replicas` setting before sending response.  <br>*Type: integer*
-acks                                     |  P  |                 |               | Alias for `request.required.acks`
-request.timeout.ms                       |  P  | 1 .. 900000     |          5000 | The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0. <br>*Type: integer*
-message.timeout.ms                       |  P  | 0 .. 900000     |        300000 | Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. <br>*Type: integer*
-queuing.strategy                         |  P  | fifo, lifo      |          fifo | Producer queuing strategy. FIFO preserves produce ordering, while LIFO prioritizes new messages. WARNING: `lifo` is experimental and subject to change or removal. <br>*Type: enum value*
-produce.offset.report                    |  P  | true, false     |         false | Report offset of produced message back to application. The application must be use the `dr_msg_cb` to retrieve the offset from `rd_kafka_message_t.offset`. <br>*Type: boolean*
-partitioner                              |  P  |                 | consistent_random | Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are rando [...]
-partitioner_cb                           |  P  |                 |               | Custom partitioner callback (set with rd_kafka_topic_conf_set_partitioner_cb()) <br>*Type: pointer*
-msg_order_cmp                            |  P  |                 |               | Message queue ordering comparator (set with rd_kafka_topic_conf_set_msg_order_cmp()). Also see `queuing.strategy`. <br>*Type: pointer*
-opaque                                   |  *  |                 |               | Application opaque (set with rd_kafka_topic_conf_set_opaque()) <br>*Type: pointer*
-compression.codec                        |  P  | none, gzip, snappy, lz4, inherit |       inherit | Compression codec to use for compressing message sets. inherit = inherit global compression.codec configuration. <br>*Type: enum value*
-compression.type                         |  P  |                 |               | Alias for `compression.codec`
-auto.commit.enable                       |  C  | true, false     |          true | If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). **NOTE:** This property should only be used with the simple legacy consumer, when using the high-level KafkaConsumer the global `enabl [...]
-enable.auto.commit                       |  C  |                 |               | Alias for `auto.commit.enable`
-auto.commit.interval.ms                  |  C  | 10 .. 86400000  |         60000 | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. This setting is used by the low-level legacy consumer. <br>*Type: integer*
-auto.offset.reset                        |  C  | smallest, earliest, beginning, largest, latest, end, error |       largest | Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error which is retrieved by consuming messages and checking 'message->err'. <br>*Type: en [...]
-offset.store.path                        |  C  |                 |             . | Path to local file for storing offsets. If the path is a directory a filename will be automatically generated in that directory based on the topic and partition. <br>*Type: string*
-offset.store.sync.interval.ms            |  C  | -1 .. 86400000  |            -1 | fsync() interval for the offset file, in milliseconds. Use -1 to disable syncing, and 0 for immediate sync after each write. <br>*Type: integer*
-offset.store.method                      |  C  | file, broker    |        broker | Offset commit store method: 'file' - local file store (offset.store.path, et.al), 'broker' - broker commit store (requires "group.id" to be configured and Apache Kafka 0.8.2 or later on the broker.). <br>*Type: enum value*
-consume.callback.max.messages            |  C  | 0 .. 1000000    |             0 | Maximum number of messages to dispatch in one `rd_kafka_consume_callback*()` call (0 = unlimited) <br>*Type: integer*
-
-### C/P legend: C = Consumer, P = Producer, * = both
diff --git a/thirdparty/librdkafka-0.11.4/CONTRIBUTING.md b/thirdparty/librdkafka-0.11.4/CONTRIBUTING.md
deleted file mode 100644
index 5da7c77..0000000
--- a/thirdparty/librdkafka-0.11.4/CONTRIBUTING.md
+++ /dev/null
@@ -1,271 +0,0 @@
-# Contributing to librdkafka
-
-(This document is based on [curl's CONTRIBUTE.md](https://github.com/curl/curl/blob/master/docs/CONTRIBUTE.md) - thank you!)
-
-This document is intended to offer guidelines on how to best contribute to the
-librdkafka project. This concerns new features as well as bug fixes and
-general improvements.
-
-### License and copyright
-
-When contributing with code, you agree to put your changes and new code under
-the same license librdkafka is already using unless stated and agreed
-otherwise.
-
-When changing existing source code, you do not alter the copyright of the
-original file(s). The copyright will still be owned by the original creator(s)
-or those who have been assigned copyright by the original author(s).
-
-By submitting a patch to the librdkafka, you are assumed to have the right
-to the code and to be allowed by your employer or whatever to hand over that
-patch/code to us. We will credit you for your changes as far as possible, to
-give credit but also to keep a trace back to who made what changes. Please
-always provide us with your full real name when contributing!
-
-Official librdkafka project maintainer(s) assume ownership of all accepted
-submissions.
-
-## Write a good patch
-
-### Follow code style
-
-When writing C code, follow the code style already established in
-the project. Consistent style makes code easier to read and mistakes less
-likely to happen.
-
-See the end of this document for the C style guide to use in librdkafka.
-
-
-### Write Separate Changes
-
-It is annoying when you get a huge patch from someone that is said to fix 511
-odd problems, but discussions and opinions don't agree with 510 of them - or
-509 of them were already fixed in a different way. Then the person merging
-this change needs to extract the single interesting patch from somewhere
-within the huge pile of source, and that gives a lot of extra work.
-
-Preferably, each fix that correct a problem should be in its own patch/commit
-with its own description/commit message stating exactly what they correct so
-that all changes can be selectively applied by the maintainer or other
-interested parties.
-
-Also, separate changes enable bisecting much better when we track problems
-and regression in the future.
-
-### Patch Against Recent Sources
-
-Please try to make your patches against latest master branch.
-
-### Test Cases
-
-Bugfixes should also include a new test case in the regression test suite
-that verifies the bug is fixed.
-Create a new tests/00<freenumber>-<short_bug_description>.c file and
-try to reproduce the issue in its most simple form.
-Verify that the test case fails for earlier versions and passes with your
-bugfix in-place.
-
-New features and APIs should also result in an added test case.
-
-Submitted patches must pass all existing tests.
-For more information on the test suite see [tests/README]
-
-
-
-## How to get your changes into the main sources
-
-File a [pull request on github](https://github.com/edenhill/librdkafka/pulls)
-
-Your change will be reviewed and discussed there and you will be
-expected to correct flaws pointed out and update accordingly, or the change
-risk stalling and eventually just get deleted without action. As a submitter
-of a change, you are the owner of that change until it has been merged.
-
-Make sure to monitor your PR on github and answer questions and/or
-fix nits/flaws. This is very important. We will take lack of replies as a
-sign that you're not very anxious to get your patch accepted and we tend to
-simply drop such changes.
-
-When you adjust your pull requests after review, please squash the
-commits so that we can review the full updated version more easily
-and keep history cleaner.
-
-For example:
-
-    # Interactive rebase to let you squash/fixup commits
-    $ git rebase -i master
-
-    # Mark fixes-on-fixes commits as 'fixup' (or just 'f') in the
-    # first column. These will be silently integrated into the
-    # previous commit, so make sure to move the fixup-commit to
-    # the line beneath the parent commit.
-
-    # Since this probably rewrote the history of previously pushed
-    # commits you will need to make a force push, which is usually
-    # a bad idea but works good for pull requests.
-    $ git push --force origin your_feature_branch
-
-
-### Write good commit messages
-
-A short guide to how to write commit messages in the curl project.
-
-    ---- start ----
-    [area]: [short line describing the main effect] [(#issuenumber)]
-           -- empty line --
-    [full description, no wider than 72 columns that describe as much as
-    possible as to why this change is made, and possibly what things
-    it fixes and everything else that is related]
-    ---- stop ----
-
-Example:
-
-    cgrp: restart query timer on all heartbeat failures (#10023)
-    
-    If unhandled errors were received in HeartbeatResponse
-    the cgrp could get stuck in a state where it would not
-    refresh its coordinator.
-
-
-
-# librdkafka C style guide
-
-## Function and globals naming
-
-Use self-explanatory hierarchical snake-case naming.
-Pretty much all symbols should start with `rd_kafka_`, followed by
-their subsystem (e.g., `cgrp`, `broker`, `buf`, etc..), followed by an
-action (e.g, `find`, `get`, `clear`, ..).
-
-
-## Variable naming
-
-For existing types use the type prefix as variable name.
-The type prefix is typically the first part of struct member fields.
-Example:
-
-  * `rd_kafka_broker_t` has field names starting with `rkb_..`, thus broker
-     variable names should be named `rkb`
-
-
-For other types use reasonably concise but descriptive names.
-`i` and `j` are typical int iterators.
-
-## Variable declaration
-
-Variables must be declared at the head of a scope, no in-line variable
-declarations are allowed.
-
-## Indenting
-
-Use 8 spaces indent, same as the Linux kernel.
-In emacs, use `c-set-style "linux`.
-For C++, use Google's C++ style.
-
-## Comments
-
-Use `/* .. */` comments, not `// ..`
-
-For functions, use doxygen syntax, e.g.:
-
-    /**
-     * @brief <short description>
-     * ..
-     * @returns <something..>
-     */
-
-
-Make sure to comment non-obvious code and situations where the full
-context of an operation is not easily graspable.
-
-Also make sure to update existing comments when the code changes.
-
-
-## Line length
-
-Try hard to keep line length below 80 characters, when this is not possible
-exceed it with reason.
-
-
-## Braces
-
-Braces go on the same line as their enveloping statement:
-
-    int some_func (..) {
-      while (1) {
-        if (1) {
-          do something;
-          ..
-        } else {
-          do something else;
-          ..
-        }
-      }
- 
-      /* Single line scopes should not have braces */
-      if (1)
-        hi();
-      else if (2)
-        /* Say hello */
-        hello();
-      else
-        bye();
-
-
-## Spaces
-
-All expression parentheses should be prefixed and suffixed with a single space:
-
-    int some_func (int a) {
-
-        if (1)
-          ....;
-
-        for (i = 0 ; i < 19 ; i++) {
-
-
-        }
-    }
-
-
-Use space around operators:
-
-    int a = 2;
-  
-    if (b >= 3)
-       c += 2;
-
-Except for these:
-  
-    d++;
-    --e;
-
-
-## New block on new line
-
-New blocks should be on a new line:
-
-    if (1)
-      new();
-    else
-      old();
-
-
-## Parentheses
-
-Don't assume the reader knows C operator precedence by heart for complex
-statements, add parentheses to ease readability.
-
-
-## ifdef hell
-
-Avoid ifdef's as much as possible.
-Platform support checking should be performed in configure.librdkafka.
-
-
-
-
-
-# librdkafka C++ style guide
-
-Follow [Google's C++ style guide](https://google.github.io/styleguide/cppguide.html)
diff --git a/thirdparty/librdkafka-0.11.4/Doxyfile b/thirdparty/librdkafka-0.11.4/Doxyfile
deleted file mode 100644
index 8e94e12..0000000
--- a/thirdparty/librdkafka-0.11.4/Doxyfile
+++ /dev/null
@@ -1,2385 +0,0 @@
-# Doxyfile 1.8.9.1
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a double hash (##) is considered a comment and is placed in
-# front of the TAG it is preceding.
-#
-# All text after a single hash (#) is considered a comment and will be ignored.
-# The format is:
-# TAG = value [value, ...]
-# For lists, items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (\" \").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all text
-# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
-# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
-# for the list of possible encodings.
-# The default value is: UTF-8.
-
-DOXYFILE_ENCODING      = UTF-8
-
-# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
-# double-quotes, unless you are using Doxywizard) that should identify the
-# project for which the documentation is generated. This name is used in the
-# title of most generated pages and in a few other places.
-# The default value is: My Project.
-
-PROJECT_NAME           = "librdkafka"
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
-# could be handy for archiving the generated documentation or if some version
-# control system is used.
-
-PROJECT_NUMBER         =
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer a
-# quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF          = "The Apache Kafka C/C++ client library"
-
-# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
-# in the documentation. The maximum height of the logo should not exceed 55
-# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
-# the logo to the output directory.
-
-#PROJECT_LOGO           = kafka_logo.png
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
-# into which the generated documentation will be written. If a relative path is
-# entered, it will be relative to the location where doxygen was started. If
-# left blank the current directory will be used.
-
-OUTPUT_DIRECTORY       = staging-docs
-
-# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
-# directories (in 2 levels) under the output directory of each output format and
-# will distribute the generated files over these directories. Enabling this
-# option can be useful when feeding doxygen a huge amount of source files, where
-# putting all generated files in the same directory would otherwise causes
-# performance problems for the file system.
-# The default value is: NO.
-
-CREATE_SUBDIRS         = NO
-
-# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
-# characters to appear in the names of generated files. If set to NO, non-ASCII
-# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
-# U+3044.
-# The default value is: NO.
-
-ALLOW_UNICODE_NAMES    = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
-# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
-# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
-# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
-# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
-# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
-# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
-# Ukrainian and Vietnamese.
-# The default value is: English.
-
-OUTPUT_LANGUAGE        = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
-# descriptions after the members that are listed in the file and class
-# documentation (similar to Javadoc). Set to NO to disable this.
-# The default value is: YES.
-
-BRIEF_MEMBER_DESC      = YES
-
-# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
-# description of a member or function before the detailed description
-#
-# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-# The default value is: YES.
-
-REPEAT_BRIEF           = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator that is
-# used to form the text in various listings. Each string in this list, if found
-# as the leading text of the brief description, will be stripped from the text
-# and the result, after processing the whole list, is used as the annotated
-# text. Otherwise, the brief description is used as-is. If left blank, the
-# following values are used ($name is automatically replaced with the name of
-# the entity):The $name class, The $name widget, The $name file, is, provides,
-# specifies, contains, represents, a, an and the.
-
-ABBREVIATE_BRIEF       =
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# doxygen will generate a detailed section even if there is only a brief
-# description.
-# The default value is: NO.
-
-ALWAYS_DETAILED_SEC    = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-# The default value is: NO.
-
-INLINE_INHERITED_MEMB  = NO
-
-# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
-# before files name in the file list and in the header files. If set to NO the
-# shortest path that makes the file name unique will be used
-# The default value is: YES.
-
-FULL_PATH_NAMES        = YES
-
-# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
-# Stripping is only done if one of the specified strings matches the left-hand
-# part of the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the path to
-# strip.
-#
-# Note that you can specify absolute paths here, but also relative paths, which
-# will be relative from the directory where doxygen is started.
-# This tag requires that the tag FULL_PATH_NAMES is set to YES.
-
-STRIP_FROM_PATH        =
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
-# path mentioned in the documentation of a class, which tells the reader which
-# header file to include in order to use a class. If left blank only the name of
-# the header file containing the class definition is used. Otherwise one should
-# specify the list of include paths that are normally passed to the compiler
-# using the -I flag.
-
-STRIP_FROM_INC_PATH    =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
-# less readable) file names. This can be useful is your file systems doesn't
-# support long names like on DOS, Mac, or CD-ROM.
-# The default value is: NO.
-
-SHORT_NAMES            = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
-# first line (until the first dot) of a Javadoc-style comment as the brief
-# description. If set to NO, the Javadoc-style will behave just like regular Qt-
-# style comments (thus requiring an explicit @brief command for a brief
-# description.)
-# The default value is: NO.
-
-JAVADOC_AUTOBRIEF      = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
-# line (until the first dot) of a Qt-style comment as the brief description. If
-# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
-# requiring an explicit \brief command for a brief description.)
-# The default value is: NO.
-
-QT_AUTOBRIEF           = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
-# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
-# a brief description. This used to be the default behavior. The new default is
-# to treat a multi-line C++ comment block as a detailed description. Set this
-# tag to YES if you prefer the old behavior instead.
-#
-# Note that setting this tag to YES also means that rational rose comments are
-# not recognized any more.
-# The default value is: NO.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
-# documentation from any documented member that it re-implements.
-# The default value is: YES.
-
-INHERIT_DOCS           = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
-# page for each member. If set to NO, the documentation of a member will be part
-# of the file/class/namespace that contains it.
-# The default value is: NO.
-
-SEPARATE_MEMBER_PAGES  = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
-# uses this value to replace tabs by spaces in code fragments.
-# Minimum value: 1, maximum value: 16, default value: 4.
-
-TAB_SIZE               = 4
-
-# This tag can be used to specify a number of aliases that act as commands in
-# the documentation. An alias has the form:
-# name=value
-# For example adding
-# "sideeffect=@par Side Effects:\n"
-# will allow you to put the command \sideeffect (or @sideeffect) in the
-# documentation, which will result in a user-defined paragraph with heading
-# "Side Effects:". You can put \n's in the value part of an alias to insert
-# newlines.
-
-ALIASES                = "locality=@par Thread restriction:"
-ALIASES               += "locks=@par Lock restriction:"
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding "class=itcl::class"
-# will allow you to use the command class in the itcl::class meaning.
-
-TCL_SUBST              =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
-# only. Doxygen will then generate output that is more tailored for C. For
-# instance, some of the names that are used will be different. The list of all
-# members will be omitted, etc.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_FOR_C  = YES
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
-# Python sources only. Doxygen will then generate output that is more tailored
-# for that language. For instance, namespaces will be presented as packages,
-# qualified scopes will look different, etc.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_JAVA   = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources. Doxygen will then generate output that is tailored for Fortran.
-# The default value is: NO.
-
-OPTIMIZE_FOR_FORTRAN   = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for VHDL.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_VHDL   = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given
-# extension. Doxygen has a built-in mapping, but you can override or extend it
-# using this tag. The format is ext=language, where ext is a file extension, and
-# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
-# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
-# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
-# Fortran. In the later case the parser tries to guess whether the code is fixed
-# or free formatted code, this is the default for Fortran type files), VHDL. For
-# instance to make doxygen treat .inc files as Fortran files (default is PHP),
-# and .f files as C (default is Fortran), use: inc=Fortran f=C.
-#
-# Note: For files without extension you can use no_extension as a placeholder.
-#
-# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
-# the files are not read by doxygen.
-
-EXTENSION_MAPPING      =
-
-# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
-# according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you can
-# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
-# case of backward compatibilities issues.
-# The default value is: YES.
-
-MARKDOWN_SUPPORT       = YES
-
-# When enabled doxygen tries to link words that correspond to documented
-# classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by putting a % sign in front of the word or
-# globally by setting AUTOLINK_SUPPORT to NO.
-# The default value is: YES.
-
-AUTOLINK_SUPPORT       = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should set this
-# tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string);
-# versus func(std::string) {}). This also make the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-# The default value is: NO.
-
-BUILTIN_STL_SUPPORT    = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-# The default value is: NO.
-
-CPP_CLI_SUPPORT        = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
-# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
-# will parse them like normal C++ but will assume all classes use public instead
-# of private inheritance when no explicit protection keyword is present.
-# The default value is: NO.
-
-SIP_SUPPORT            = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate
-# getter and setter methods for a property. Setting this option to YES will make
-# doxygen to replace the get and set methods by a property in the documentation.
-# This will only work if the methods are indeed getting or setting a simple
-# type. If this is not the case, or you want to show the methods anyway, you
-# should set this option to NO.
-# The default value is: YES.
-
-IDL_PROPERTY_SUPPORT   = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-# The default value is: NO.
-
-DISTRIBUTE_GROUP_DOC   = NO
-
-# Set the SUBGROUPING tag to YES to allow class member groups of the same type
-# (for instance a group of public functions) to be put as a subgroup of that
-# type (e.g. under the Public Functions section). Set it to NO to prevent
-# subgrouping. Alternatively, this can be done per class using the
-# \nosubgrouping command.
-# The default value is: YES.
-
-SUBGROUPING            = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
-# are shown inside the group in which they are included (e.g. using \ingroup)
-# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
-# and RTF).
-#
-# Note that this feature does not work in combination with
-# SEPARATE_MEMBER_PAGES.
-# The default value is: NO.
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
-# with only public data fields or simple typedef fields will be shown inline in
-# the documentation of the scope in which they are defined (i.e. file,
-# namespace, or group documentation), provided this scope is documented. If set
-# to NO, structs, classes, and unions are shown on a separate page (for HTML and
-# Man pages) or section (for LaTeX and RTF).
-# The default value is: NO.
-
-INLINE_SIMPLE_STRUCTS  = NO
-
-# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
-# enum is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically be
-# useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-# The default value is: NO.
-
-TYPEDEF_HIDES_STRUCT   = YES
-
-# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
-# cache is used to resolve symbols given their name and scope. Since this can be
-# an expensive process and often the same symbol appears multiple times in the
-# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
-# doxygen will become slower. If the cache is too large, memory is wasted. The
-# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
-# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
-# symbols. At the end of a run doxygen will report the cache usage and suggest
-# the optimal cache size from a speed point of view.
-# Minimum value: 0, maximum value: 9, default value: 0.
-
-LOOKUP_CACHE_SIZE      = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
-# documentation are documented, even if no documentation was available. Private
-# class members and static file members will be hidden unless the
-# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
-# Note: This will also disable the warnings about undocumented members that are
-# normally produced when WARNINGS is set to YES.
-# The default value is: NO.
-
-EXTRACT_ALL            = NO
-
-# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
-# be included in the documentation.
-# The default value is: NO.
-
-EXTRACT_PRIVATE        = NO
-
-# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
-# scope will be included in the documentation.
-# The default value is: NO.
-
-EXTRACT_PACKAGE        = NO
-
-# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
-# included in the documentation.
-# The default value is: NO.
-
-EXTRACT_STATIC         = YES
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
-# locally in source files will be included in the documentation. If set to NO,
-# only classes defined in header files are included. Does not have any effect
-# for Java sources.
-# The default value is: YES.
-
-EXTRACT_LOCAL_CLASSES  = YES
-
-# This flag is only useful for Objective-C code. If set to YES, local methods,
-# which are defined in the implementation section but not in the interface are
-# included in the documentation. If set to NO, only methods in the interface are
-# included.
-# The default value is: NO.
-
-EXTRACT_LOCAL_METHODS  = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base name of
-# the file that contains the anonymous namespace. By default anonymous namespace
-# are hidden.
-# The default value is: NO.
-
-EXTRACT_ANON_NSPACES   = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
-# undocumented members inside documented classes or files. If set to NO these
-# members will be included in the various overviews, but no documentation
-# section is generated. This option has no effect if EXTRACT_ALL is enabled.
-# The default value is: NO.
-
-HIDE_UNDOC_MEMBERS     = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy. If set
-# to NO, these classes will be included in the various overviews. This option
-# has no effect if EXTRACT_ALL is enabled.
-# The default value is: NO.
-
-HIDE_UNDOC_CLASSES     = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
-# (class|struct|union) declarations. If set to NO, these declarations will be
-# included in the documentation.
-# The default value is: NO.
-
-HIDE_FRIEND_COMPOUNDS  = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
-# documentation blocks found inside the body of a function. If set to NO, these
-# blocks will be appended to the function's detailed documentation block.
-# The default value is: NO.
-
-HIDE_IN_BODY_DOCS      = NO
-
-# The INTERNAL_DOCS tag determines if documentation that is typed after a
-# \internal command is included. If the tag is set to NO then the documentation
-# will be excluded. Set it to YES to include the internal documentation.
-# The default value is: NO.
-
-INTERNAL_DOCS          = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
-# names in lower-case letters. If set to YES, upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-# The default value is: system dependent.
-
-CASE_SENSE_NAMES       = YES
-
-# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
-# their full class and namespace scopes in the documentation. If set to YES, the
-# scope will be hidden.
-# The default value is: NO.
-
-HIDE_SCOPE_NAMES       = NO
-
-# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
-# append additional text to a page's title, such as Class Reference. If set to
-# YES the compound reference will be hidden.
-# The default value is: NO.
-
-HIDE_COMPOUND_REFERENCE= NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
-# the files that are included by a file in the documentation of that file.
-# The default value is: YES.
-
-SHOW_INCLUDE_FILES     = YES
-
-# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
-# grouped member an include statement to the documentation, telling the reader
-# which file to include in order to use the member.
-# The default value is: NO.
-
-SHOW_GROUPED_MEMB_INC  = NO
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
-# files with double quotes in the documentation rather than with sharp brackets.
-# The default value is: NO.
-
-FORCE_LOCAL_INCLUDES   = NO
-
-# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
-# documentation for inline members.
-# The default value is: YES.
-
-INLINE_INFO            = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
-# (detailed) documentation of file and class members alphabetically by member
-# name. If set to NO, the members will appear in declaration order.
-# The default value is: YES.
-
-SORT_MEMBER_DOCS       = NO
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
-# descriptions of file, namespace and class members alphabetically by member
-# name. If set to NO, the members will appear in declaration order. Note that
-# this will also influence the order of the classes in the class list.
-# The default value is: NO.
-
-SORT_BRIEF_DOCS        = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
-# (brief and detailed) documentation of class members so that constructors and
-# destructors are listed first. If set to NO the constructors will appear in the
-# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
-# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
-# member documentation.
-# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
-# detailed member documentation.
-# The default value is: NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
-# of group names into alphabetical order. If set to NO the group names will
-# appear in their defined order.
-# The default value is: NO.
-
-SORT_GROUP_NAMES       = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
-# fully-qualified names, including namespaces. If set to NO, the class list will
-# be sorted only by class name, not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the alphabetical
-# list.
-# The default value is: NO.
-
-SORT_BY_SCOPE_NAME     = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
-# type resolution of all parameters of a function it will reject a match between
-# the prototype and the implementation of a member function even if there is
-# only one candidate or it is obvious which candidate to choose by doing a
-# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
-# accept a match between prototype and implementation in such cases.
-# The default value is: NO.
-
-STRICT_PROTO_MATCHING  = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
-# list. This list is created by putting \todo commands in the documentation.
-# The default value is: YES.
-
-GENERATE_TODOLIST      = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
-# list. This list is created by putting \test commands in the documentation.
-# The default value is: YES.
-
-GENERATE_TESTLIST      = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
-# list. This list is created by putting \bug commands in the documentation.
-# The default value is: YES.
-
-GENERATE_BUGLIST       = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
-# the deprecated list. This list is created by putting \deprecated commands in
-# the documentation.
-# The default value is: YES.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional documentation
-# sections, marked by \if <section_label> ... \endif and \cond <section_label>
-# ... \endcond blocks.
-
-ENABLED_SECTIONS       =
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
-# initial value of a variable or macro / define can have for it to appear in the
-# documentation. If the initializer consists of more lines than specified here
-# it will be hidden. Use a value of 0 to hide initializers completely. The
-# appearance of the value of individual variables and macros / defines can be
-# controlled using \showinitializer or \hideinitializer command in the
-# documentation regardless of this setting.
-# Minimum value: 0, maximum value: 10000, default value: 30.
-
-MAX_INITIALIZER_LINES  = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
-# the bottom of the documentation of classes and structs. If set to YES, the
-# list will mention the files that were used to generate the documentation.
-# The default value is: YES.
-
-SHOW_USED_FILES        = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
-# will remove the Files entry from the Quick Index and from the Folder Tree View
-# (if specified).
-# The default value is: YES.
-
-SHOW_FILES             = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
-# page. This will remove the Namespaces entry from the Quick Index and from the
-# Folder Tree View (if specified).
-# The default value is: YES.
-
-SHOW_NAMESPACES        = NO
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command command input-file, where command is the value of the
-# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
-# by doxygen. Whatever the program writes to standard output is used as the file
-# version. For an example see the documentation.
-
-FILE_VERSION_FILTER    =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option. You can
-# optionally specify a file name after the option, if omitted DoxygenLayout.xml
-# will be used as the name of the layout file.
-#
-# Note that if you run doxygen from a directory containing a file called
-# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
-# tag is left empty.
-
-LAYOUT_FILE            =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
-# the reference definitions. This must be a list of .bib files. The .bib
-# extension is automatically appended if omitted. This requires the bibtex tool
-# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
-# For LaTeX the style of the bibliography can be controlled using
-# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
-# search path. See also \cite for info how to create references.
-
-CITE_BIB_FILES         =
-
-#---------------------------------------------------------------------------
-# Configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated to
-# standard output by doxygen. If QUIET is set to YES this implies that the
-# messages are off.
-# The default value is: NO.
-
-QUIET                  = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
-# this implies that the warnings are on.
-#
-# Tip: Turn warnings on while writing the documentation.
-# The default value is: YES.
-
-WARNINGS               = YES
-
-# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
-# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
-# will automatically be disabled.
-# The default value is: YES.
-
-WARN_IF_UNDOCUMENTED   = YES
-
-# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some parameters
-# in a documented function, or documenting parameters that don't exist or using
-# markup commands wrongly.
-# The default value is: YES.
-
-WARN_IF_DOC_ERROR      = YES
-
-# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
-# are documented, but have no documentation for their parameters or return
-# value. If set to NO, doxygen will only warn about wrong or incomplete
-# parameter documentation, but not about the absence of documentation.
-# The default value is: NO.
-
-WARN_NO_PARAMDOC       = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that doxygen
-# can produce. The string should contain the $file, $line, and $text tags, which
-# will be replaced by the file and line number from which the warning originated
-# and the warning text. Optionally the format may contain $version, which will
-# be replaced by the version of the file (if it could be obtained via
-# FILE_VERSION_FILTER)
-# The default value is: $file:$line: $text.
-
-WARN_FORMAT            = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning and error
-# messages should be written. If left blank the output is written to standard
-# error (stderr).
-
-WARN_LOGFILE           =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag is used to specify the files and/or directories that contain
-# documented source files. You may enter file names like myfile.cpp or
-# directories like /usr/src/myproject. Separate the files or directories with
-# spaces.
-# Note: If this tag is empty the current directory is searched.
-
-INPUT                  = mainpage.doxy INTRODUCTION.md CONFIGURATION.md src/rdkafka.h src-cpp/rdkafkacpp.h
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
-# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
-# documentation (see: http://www.gnu.org/software/libiconv) for the list of
-# possible encodings.
-# The default value is: UTF-8.
-
-INPUT_ENCODING         = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank the
-# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
-# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
-# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
-# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
-# *.qsf, *.as and *.js.
-
-FILE_PATTERNS          =
-
-# The RECURSIVE tag can be used to specify whether or not subdirectories should
-# be searched for input files as well.
-# The default value is: NO.
-
-RECURSIVE              = NO
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-#
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE                =
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-# The default value is: NO.
-
-EXCLUDE_SYMLINKS       = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories.
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories for example use the pattern */test/*
-
-EXCLUDE_PATTERNS       =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories use the pattern */test/*
-
-EXCLUDE_SYMBOLS        =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or directories
-# that contain example code fragments that are included (see the \include
-# command).
-
-EXAMPLE_PATH           =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank all
-# files are included.
-
-EXAMPLE_PATTERNS       =
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude commands
-# irrespective of the value of the RECURSIVE tag.
-# The default value is: NO.
-
-EXAMPLE_RECURSIVE      = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or directories
-# that contain images that are to be included in the documentation (see the
-# \image command).
-
-IMAGE_PATH             =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command:
-#
-# <filter> <input-file>
-#
-# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
-# name of an input file. Doxygen will then use the output that the filter
-# program writes to standard output. If FILTER_PATTERNS is specified, this tag
-# will be ignored.
-#
-# Note that the filter must not add or remove lines; it is applied before the
-# code is scanned, but not when the output code is generated. If lines are added
-# or removed, the anchors will not be placed correctly.
-
-INPUT_FILTER           =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis. Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match. The filters are a list of the form: pattern=filter
-# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
-# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
-# patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS        =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will also be used to filter the input files that are used for
-# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
-# The default value is: NO.
-
-FILTER_SOURCE_FILES    = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
-# it is also possible to disable source filtering for a specific pattern using
-# *.ext= (so without naming a filter).
-# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
-
-FILTER_SOURCE_PATTERNS =
-
-# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
-# is part of the input, its contents will be placed on the main page
-# (index.html). This can be useful if you have a project on for instance GitHub
-# and want to reuse the introduction page also for the doxygen output.
-
-USE_MDFILE_AS_MAINPAGE =
-
-#---------------------------------------------------------------------------
-# Configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
-# generated. Documented entities will be cross-referenced with these sources.
-#
-# Note: To get rid of all source code in the generated output, make sure that
-# also VERBATIM_HEADERS is set to NO.
-# The default value is: NO.
-
-SOURCE_BROWSER         = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body of functions,
-# classes and enums directly into the documentation.
-# The default value is: NO.
-
-INLINE_SOURCES         = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
-# special comment blocks from generated source code fragments. Normal C, C++ and
-# Fortran comments will always remain visible.
-# The default value is: YES.
-
-STRIP_CODE_COMMENTS    = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
-# function all documented functions referencing it will be listed.
-# The default value is: NO.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES then for each documented function
-# all documented entities called/used by that function will be listed.
-# The default value is: NO.
-
-REFERENCES_RELATION    = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
-# to YES then the hyperlinks from functions in REFERENCES_RELATION and
-# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
-# link to the documentation.
-# The default value is: YES.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
-# source code will show a tooltip with additional information such as prototype,
-# brief description and links to the definition and documentation. Since this
-# will make the HTML file larger and loading of large files a bit slower, you
-# can opt to disable this feature.
-# The default value is: YES.
-# This tag requires that the tag SOURCE_BROWSER is set to YES.
-
-SOURCE_TOOLTIPS        = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code will
-# point to the HTML generated by the htags(1) tool instead of doxygen built-in
-# source browser. The htags tool is part of GNU's global source tagging system
-# (see http://www.gnu.org/software/global/global.html). You will need version
-# 4.8.6 or higher.
-#
-# To use it do the following:
-# - Install the latest version of global
-# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
-# - Make sure the INPUT points to the root of the source tree
-# - Run doxygen as normal
-#
-# Doxygen will invoke htags (and that will in turn invoke gtags), so these
-# tools must be available from the command line (i.e. in the search path).
-#
-# The result: instead of the source browser generated by doxygen, the links to
-# source code will now point to the output of htags.
-# The default value is: NO.
-# This tag requires that the tag SOURCE_BROWSER is set to YES.
-
-USE_HTAGS              = NO
-
-# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
-# verbatim copy of the header file for each class for which an include is
-# specified. Set to NO to disable this.
-# See also: Section \class.
-# The default value is: YES.
-
-VERBATIM_HEADERS       = YES
-
-# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
-# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
-# cost of reduced performance. This can be particularly helpful with template
-# rich C++ code for which doxygen's built-in parser lacks the necessary type
-# information.
-# Note: The availability of this option depends on whether or not doxygen was
-# compiled with the --with-libclang option.
-# The default value is: NO.
-
-CLANG_ASSISTED_PARSING = NO
-
-# If clang assisted parsing is enabled you can provide the compiler with command
-# line options that you would normally use when invoking the compiler. Note that
-# the include paths will already be set by doxygen for the files and directories
-# specified with INPUT and INCLUDE_PATH.
-# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
-
-CLANG_OPTIONS          =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
-# compounds will be generated. Enable this if the project contains a lot of
-# classes, structs, unions or interfaces.
-# The default value is: YES.
-
-ALPHABETICAL_INDEX     = YES
-
-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
-# which the alphabetical index list will be split.
-# Minimum value: 1, maximum value: 20, default value: 5.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-COLS_IN_ALPHA_INDEX    = 5
-
-# In case all classes in a project start with a common prefix, all classes will
-# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
-# can be used to specify a prefix (or a list of prefixes) that should be ignored
-# while generating the index headers.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-IGNORE_PREFIX          =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
-# The default value is: YES.
-
-GENERATE_HTML          = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: html.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_OUTPUT            = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
-# generated HTML page (for example: .htm, .php, .asp).
-# The default value is: .html.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_FILE_EXTENSION    = .html
-
-# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
-# each generated HTML page. If the tag is left blank doxygen will generate a
-# standard header.
-#
-# To get valid HTML the header file that includes any scripts and style sheets
-# that doxygen needs, which is dependent on the configuration options used (e.g.
-# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
-# default header using
-# doxygen -w html new_header.html new_footer.html new_stylesheet.css
-# YourConfigFile
-# and then modify the file new_header.html. See also section "Doxygen usage"
-# for information on how to generate the default header that doxygen normally
-# uses.
-# Note: The header is subject to change so you typically have to regenerate the
-# default header when upgrading to a newer version of doxygen. For a description
-# of the possible markers and block names see the documentation.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_HEADER            =
-
-# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
-# generated HTML page. If the tag is left blank doxygen will generate a standard
-# footer. See HTML_HEADER for more information on how to generate a default
-# footer and what special commands can be used inside the footer. See also
-# section "Doxygen usage" for information on how to generate the default footer
-# that doxygen normally uses.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_FOOTER            =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
-# sheet that is used by each HTML page. It can be used to fine-tune the look of
-# the HTML output. If left blank doxygen will generate a default style sheet.
-# See also section "Doxygen usage" for information on how to generate the style
-# sheet that doxygen normally uses.
-# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
-# it is more robust and this tag (HTML_STYLESHEET) will in the future become
-# obsolete.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_STYLESHEET        =
-
-# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
-# cascading style sheets that are included after the standard style sheets
-# created by doxygen. Using this option one can overrule certain style aspects.
-# This is preferred over using HTML_STYLESHEET since it does not replace the
-# standard style sheet and is therefore more robust against future updates.
-# Doxygen will copy the style sheet files to the output directory.
-# Note: The order of the extra style sheet files is of importance (e.g. the last
-# style sheet in the list overrules the setting of the previous ones in the
-# list). For an example see the documentation.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_EXTRA_STYLESHEET  =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
-# files will be copied as-is; there are no commands or markers available.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_EXTRA_FILES       =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
-# will adjust the colors in the style sheet and background images according to
-# this color. Hue is specified as an angle on a colorwheel, see
-# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
-# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
-# purple, and 360 is red again.
-# Minimum value: 0, maximum value: 359, default value: 220.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_HUE    = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
-# in the HTML output. For a value of 0 the output will use grayscales only. A
-# value of 255 will produce the most vivid colors.
-# Minimum value: 0, maximum value: 255, default value: 100.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_SAT    = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
-# luminance component of the colors in the HTML output. Values below 100
-# gradually make the output lighter, whereas values above 100 make the output
-# darker. The value divided by 100 is the actual gamma applied, so 80 represents
-# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
-# change the gamma.
-# Minimum value: 40, maximum value: 240, default value: 80.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_GAMMA  = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting this
-# to YES can help to show when doxygen was last run and thus if the
-# documentation is up to date.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_TIMESTAMP         = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_DYNAMIC_SECTIONS  = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
-# shown in the various tree structured indices initially; the user can expand
-# and collapse entries dynamically later on. Doxygen will expand the tree to
-# such a level that at most the specified number of entries are visible (unless
-# a fully collapsed tree already exceeds this amount). So setting the number of
-# entries 1 will produce a full collapsed tree by default. 0 is a special value
-# representing an infinite number of entries and will result in a full expanded
-# tree by default.
-# Minimum value: 0, maximum value: 9999, default value: 100.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files will be
-# generated that can be used as input for Apple's Xcode 3 integrated development
-# environment (see: http://developer.apple.com/tools/xcode/), introduced with
-# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
-# Makefile in the HTML output directory. Running make will produce the docset in
-# that directory and running make install will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
-# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_DOCSET        = NO
-
-# This tag determines the name of the docset feed. A documentation feed provides
-# an umbrella under which multiple documentation sets from a single provider
-# (such as a company or product suite) can be grouped.
-# The default value is: Doxygen generated docs.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_FEEDNAME        = "librdkafka documentation"
-
-# This tag specifies a string that should uniquely identify the documentation
-# set bundle. This should be a reverse domain-name style string, e.g.
-# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_BUNDLE_ID       = se.edenhill.librdkafka
-
-# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-# The default value is: org.doxygen.Publisher.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_PUBLISHER_ID    = se.edenhill
-
-# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
-# The default value is: Publisher.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_PUBLISHER_NAME  = Magnus Edenhill
-
-# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
-# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
-# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
-# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
-# Windows.
-#
-# The HTML Help Workshop contains a compiler that can convert all HTML output
-# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
-# files are now used as the Windows 98 help format, and will replace the old
-# Windows help format (.hlp) on all Windows platforms in the future. Compressed
-# HTML files also contain an index, a table of contents, and you can search for
-# words in the documentation. The HTML workshop also contains a viewer for
-# compressed HTML files.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_HTMLHELP      = NO
-
-# The CHM_FILE tag can be used to specify the file name of the resulting .chm
-# file. You can add a path in front of the file if the result should not be
-# written to the html output directory.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-CHM_FILE               =
-
-# The HHC_LOCATION tag can be used to specify the location (absolute path
-# including file name) of the HTML help compiler (hhc.exe). If non-empty,
-# doxygen will try to run the HTML help compiler on the generated index.hhp.
-# The file has to be specified with full path.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-HHC_LOCATION           =
-
-# The GENERATE_CHI flag controls if a separate .chi index file is generated
-# (YES) or that it should be included in the master .chm file (NO).
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-GENERATE_CHI           = NO
-
-# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
-# and project file content.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-CHM_INDEX_ENCODING     =
-
-# The BINARY_TOC flag controls whether a binary table of contents is generated
-# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
-# enables the Previous and Next buttons.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-BINARY_TOC             = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members to
-# the table of contents of the HTML help documentation and to the tree view.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-TOC_EXPAND             = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
-# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
-# (.qch) of the generated HTML documentation.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_QHP           = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
-# the file name of the resulting .qch file. The path specified is relative to
-# the HTML output folder.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QCH_FILE               =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
-# Project output. For more information please see Qt Help Project / Namespace
-# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_NAMESPACE          = se.edenhill.librdkafka
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
-# Help Project output. For more information please see Qt Help Project / Virtual
-# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
-# folders).
-# The default value is: doc.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_VIRTUAL_FOLDER     = doc
-
-# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
-# filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
-# filters).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_CUST_FILTER_NAME   =
-
-# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
-# filters).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_CUST_FILTER_ATTRS  =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's filter section matches. Qt Help Project / Filter Attributes (see:
-# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_SECT_FILTER_ATTRS  =
-
-# The QHG_LOCATION tag can be used to specify the location of Qt's
-# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
-# generated .qhp file.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHG_LOCATION           =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
-# generated, together with the HTML files, they form an Eclipse help plugin. To
-# install this plugin and make it available under the help contents menu in
-# Eclipse, the contents of the directory containing the HTML and XML files needs
-# to be copied into the plugins directory of eclipse. The name of the directory
-# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
-# After copying Eclipse needs to be restarted before the help appears.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_ECLIPSEHELP   = NO
-
-# A unique identifier for the Eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have this
-# name. Each documentation set should have its own identifier.
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
-
-ECLIPSE_DOC_ID         = se.edenhill.librdkafka
-
-# If you want full control over the layout of the generated HTML pages it might
-# be necessary to disable the index and replace it with your own. The
-# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
-# of each HTML page. A value of NO enables the index and the value YES disables
-# it. Since the tabs in the index contain the same information as the navigation
-# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-DISABLE_INDEX          = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information. If the tag
-# value is set to YES, a side panel will be generated containing a tree-like
-# index structure (just like the one that is generated for HTML Help). For this
-# to work a browser that supports JavaScript, DHTML, CSS and frames is required
-# (i.e. any modern browser). Windows users are probably better off using the
-# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
-# further fine-tune the look of the index. As an example, the default style
-# sheet generated by doxygen has an example that shows how to put an image at
-# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
-# the same information as the tab index, you could consider setting
-# DISABLE_INDEX to YES when enabling this option.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_TREEVIEW      = YES
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
-# doxygen will group on one line in the generated HTML documentation.
-#
-# Note that a value of 0 will completely suppress the enum values from appearing
-# in the overview section.
-# Minimum value: 0, maximum value: 20, default value: 4.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-ENUM_VALUES_PER_LINE   = 1
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
-# to set the initial width (in pixels) of the frame in which the tree is shown.
-# Minimum value: 0, maximum value: 1500, default value: 250.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-TREEVIEW_WIDTH         = 250
-
-# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
-# external symbols imported via tag files in a separate window.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-EXT_LINKS_IN_WINDOW    = NO
-
-# Use this tag to change the font size of LaTeX formulas included as images in
-# the HTML documentation. When you change the font size after a successful
-# doxygen run you need to manually remove any form_*.png images from the HTML
-# output directory to force them to be regenerated.
-# Minimum value: 8, maximum value: 50, default value: 10.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_FONTSIZE       = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are not
-# supported properly for IE 6.0, but are supported on all modern browsers.
-#
-# Note that when changing this option you need to delete any form_*.png files in
-# the HTML output directory before the changes have effect.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_TRANSPARENT    = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
-# http://www.mathjax.org) which uses client side Javascript for the rendering
-# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
-# installed or if you want to formulas look prettier in the HTML output. When
-# enabled you may also need to install MathJax separately and configure the path
-# to it using the MATHJAX_RELPATH option.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-USE_MATHJAX            = NO
-
-# When MathJax is enabled you can set the default output format to be used for
-# the MathJax output. See the MathJax site (see:
-# http://docs.mathjax.org/en/latest/output.html) for more details.
-# Possible values are: HTML-CSS (which is slower, but has the best
-# compatibility), NativeMML (i.e. MathML) and SVG.
-# The default value is: HTML-CSS.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_FORMAT         = HTML-CSS
-
-# When MathJax is enabled you need to specify the location relative to the HTML
-# output directory using the MATHJAX_RELPATH option. The destination directory
-# should contain the MathJax.js script. For instance, if the mathjax directory
-# is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
-# Content Delivery Network so you can quickly see the result without installing
-# MathJax. However, it is strongly recommended to install a local copy of
-# MathJax from http://www.mathjax.org before deployment.
-# The default value is: http://cdn.mathjax.org/mathjax/latest.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
-# extension names that should be enabled during MathJax rendering. For example
-# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_EXTENSIONS     =
-
-# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
-# of code that will be used on startup of the MathJax code. See the MathJax site
-# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
-# example see the documentation.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_CODEFILE       =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
-# the HTML output. The underlying search engine uses javascript and DHTML and
-# should work on any modern browser. Note that when using HTML help
-# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
-# there is already a search function so this one should typically be disabled.
-# For large projects the javascript based search engine can be slow, then
-# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
-# search using the keyboard; to jump to the search box use <access key> + S
-# (what the <access key> is depends on the OS and browser, but it is typically
-# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
-# key> to jump into the search results window, the results can be navigated
-# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
-# the search. The filter options can be selected when the cursor is inside the
-# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
-# to select a filter and <Enter> or <escape> to activate or cancel the filter
-# option.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-SEARCHENGINE           = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a web server instead of a web client using Javascript. There
-# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
-# setting. When disabled, doxygen will generate a PHP script for searching and
-# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
-# and searching needs to be provided by external tools. See the section
-# "External Indexing and Searching" for details.
-# The default value is: NO.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SERVER_BASED_SEARCH    = NO
-
-# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
-# script for searching. Instead the search results are written to an XML file
-# which needs to be processed by an external indexer. Doxygen will invoke an
-# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
-# search results.
-#
-# Doxygen ships with an example indexer (doxyindexer) and search engine
-# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: http://xapian.org/).
-#
-# See the section "External Indexing and Searching" for details.
-# The default value is: NO.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTERNAL_SEARCH        = NO
-
-# The SEARCHENGINE_URL should point to a search engine hosted by a web server
-# which will return the search results when EXTERNAL_SEARCH is enabled.
-#
-# Doxygen ships with an example indexer (doxyindexer) and search engine
-# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: http://xapian.org/). See the section "External Indexing and
-# Searching" for details.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SEARCHENGINE_URL       =
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
-# search data is written to a file for indexing by an external tool. With the
-# SEARCHDATA_FILE tag the name of this file can be specified.
-# The default file is: searchdata.xml.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SEARCHDATA_FILE        = searchdata.xml
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
-# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
-# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
-# projects and redirect the results back to the right project.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTERNAL_SEARCH_ID     =
-
-# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
-# projects other than the one defined by this configuration file, but that are
-# all added to the same external search index. Each project needs to have a
-# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
-# to a relative location where the documentation can be found. The format is:
-# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTRA_SEARCH_MAPPINGS  =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
-# The default value is: YES.
-
-GENERATE_LATEX         = YES
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: latex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_OUTPUT           = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked.
-#
-# Note that when enabling USE_PDFLATEX this option is only used for generating
-# bitmaps for formulas in the HTML output, but not in the Makefile that is
-# written to the output directory.
-# The default file is: latex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_CMD_NAME         = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
-# index for LaTeX.
-# The default file is: makeindex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-MAKEINDEX_CMD_NAME     = makeindex
-
-# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
-# documents. This may be useful for small projects and may help to save some
-# trees in general.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-COMPACT_LATEX          = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used by the
-# printer.
-# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
-# 14 inches) and executive (7.25 x 10.5 inches).
-# The default value is: a4.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-PAPER_TYPE             = a4
-
-# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
-# that should be included in the LaTeX output. To get the times font for
-# instance you can specify
-# EXTRA_PACKAGES=times
-# If left blank no extra packages will be included.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-EXTRA_PACKAGES         =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
-# generated LaTeX document. The header should contain everything until the first
-# chapter. If it is left blank doxygen will generate a standard header. See
-# section "Doxygen usage" for information on how to let doxygen write the
-# default header to a separate file.
-#
-# Note: Only use a user-defined header if you know what you are doing! The
-# following commands have a special meaning inside the header: $title,
-# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
-# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
-# string, for the replacement values of the other commands the user is referred
-# to HTML_HEADER.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_HEADER           =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
-# generated LaTeX document. The footer should contain everything after the last
-# chapter. If it is left blank doxygen will generate a standard footer. See
-# LATEX_HEADER for more information on how to generate a default footer and what
-# special commands can be used inside the footer.
-#
-# Note: Only use a user-defined footer if you know what you are doing!
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_FOOTER           =
-
-# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
-# LaTeX style sheets that are included after the standard style sheets created
-# by doxygen. Using this option one can overrule certain style aspects. Doxygen
-# will copy the style sheet files to the output directory.
-# Note: The order of the extra style sheet files is of importance (e.g. the last
-# style sheet in the list overrules the setting of the previous ones in the
-# list).
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_EXTRA_STYLESHEET =
-
-# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the LATEX_OUTPUT output
-# directory. Note that the files will be copied as-is; there are no commands or
-# markers available.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_EXTRA_FILES      =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
-# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
-# contain links (just like the HTML output) instead of page references. This
-# makes the output suitable for online browsing using a PDF viewer.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-PDF_HYPERLINKS         = YES
-
-# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
-# the PDF file directly from the LaTeX files. Set this option to YES, to get a
-# higher quality PDF documentation.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-USE_PDFLATEX           = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
-# command to the generated LaTeX files. This will instruct LaTeX to keep running
-# if errors occur, instead of asking the user for help. This option is also used
-# when generating formulas in HTML.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_BATCHMODE        = NO
-
-# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
-# index chapters (such as File Index, Compound Index, etc.) in the output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_HIDE_INDICES     = NO
-
-# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
-# code with syntax highlighting in the LaTeX output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_SOURCE_CODE      = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. See
-# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
-# The default value is: plain.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_BIB_STYLE        = plain
-
-#---------------------------------------------------------------------------
-# Configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
-# RTF output is optimized for Word 97 and may not look too pretty with other RTF
-# readers/editors.
-# The default value is: NO.
-
-GENERATE_RTF           = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: rtf.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_OUTPUT             = rtf
-
-# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
-# documents. This may be useful for small projects and may help to save some
-# trees in general.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-COMPACT_RTF            = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
-# contain hyperlink fields. The RTF file will contain links (just like the HTML
-# output) instead of page references. This makes the output suitable for online
-# browsing using Word or some other Word compatible readers that support those
-# fields.
-#
-# Note: WordPad (write) and others do not support links.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_HYPERLINKS         = NO
-
-# Load stylesheet definitions from file. Syntax is similar to doxygen's config
-# file, i.e. a series of assignments. You only have to provide replacements,
-# missing definitions are set to their default value.
-#
-# See also section "Doxygen usage" for information on how to generate the
-# default style sheet that doxygen normally uses.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_STYLESHEET_FILE    =
-
-# Set optional variables used in the generation of an RTF document. Syntax is
-# similar to doxygen's config file. A template extensions file can be generated
-# using doxygen -e rtf extensionFile.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_EXTENSIONS_FILE    =
-
-# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
-# with syntax highlighting in the RTF output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_SOURCE_CODE        = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
-# classes and files.
-# The default value is: NO.
-
-GENERATE_MAN           = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it. A directory man3 will be created inside the directory specified by
-# MAN_OUTPUT.
-# The default directory is: man.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_OUTPUT             = man
-
-# The MAN_EXTENSION tag determines the extension that is added to the generated
-# man pages. In case the manual section does not start with a number, the number
-# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
-# optional.
-# The default value is: .3.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_EXTENSION          = .3
-
-# The MAN_SUBDIR tag determines the name of the directory created within
-# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
-# MAN_EXTENSION with the initial . removed.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_SUBDIR             =
-
-# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
-# will generate one additional man file for each entity documented in the real
-# man page(s). These additional files only source the real man page, but without
-# them the man command would be unable to find the correct page.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_LINKS              = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
-# captures the structure of the code including all documentation.
-# The default value is: NO.
-
-GENERATE_XML           = NO
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: xml.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_OUTPUT             = xml
-
-# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
-# listings (including syntax highlighting and cross-referencing information) to
-# the XML output. Note that enabling this will significantly increase the size
-# of the XML output.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_PROGRAMLISTING     = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to the DOCBOOK output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
-# that can be used to generate PDF.
-# The default value is: NO.
-
-GENERATE_DOCBOOK       = NO
-
-# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
-# front of it.
-# The default directory is: docbook.
-# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
-
-DOCBOOK_OUTPUT         = docbook
-
-# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
-# program listings (including syntax highlighting and cross-referencing
-# information) to the DOCBOOK output. Note that enabling this will significantly
-# increase the size of the DOCBOOK output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
-
-DOCBOOK_PROGRAMLISTING = NO
-
-#---------------------------------------------------------------------------
-# Configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
-# AutoGen Definitions (see http://autogen.sf.net) file that captures the
-# structure of the code including all documentation. Note that this feature is
-# still experimental and incomplete at the moment.
-# The default value is: NO.
-
-GENERATE_AUTOGEN_DEF   = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
-# file that captures the structure of the code including all documentation.
-#
-# Note that this feature is still experimental and incomplete at the moment.
-# The default value is: NO.
-
-GENERATE_PERLMOD       = NO
-
-# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
-# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
-# output from the Perl module output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_LATEX          = NO
-
-# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
-# formatted so it can be parsed by a human reader. This is useful if you want to
-# understand what is going on. On the other hand, if this tag is set to NO, the
-# size of the Perl module output will be much smaller and Perl will parse it
-# just the same.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_PRETTY         = YES
-
-# The names of the make variables in the generated doxyrules.make file are
-# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
-# so different doxyrules.make files included by the same Makefile don't
-# overwrite each other's variables.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
-# C-preprocessor directives found in the sources and include files.
-# The default value is: YES.
-
-ENABLE_PREPROCESSING   = YES
-
-# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
-# in the source code. If set to NO, only conditional compilation will be
-# performed. Macro expansion can be done in a controlled way by setting
-# EXPAND_ONLY_PREDEF to YES.
-# The default value is: NO.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-MACRO_EXPANSION        = NO
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
-# the macro expansion is limited to the macros specified with the PREDEFINED and
-# EXPAND_AS_DEFINED tags.
-# The default value is: NO.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-EXPAND_ONLY_PREDEF     = NO
-
-# If the SEARCH_INCLUDES tag is set to YES, the include files in the
-# INCLUDE_PATH will be searched if a #include is found.
-# The default value is: YES.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-SEARCH_INCLUDES        = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by the
-# preprocessor.
-# This tag requires that the tag SEARCH_INCLUDES is set to YES.
-
-INCLUDE_PATH           =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will be
-# used.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-INCLUDE_FILE_PATTERNS  =
-
-# The PREDEFINED tag can be used to specify one or more macro names that are
-# defined before the preprocessor is started (similar to the -D option of e.g.
-# gcc). The argument of the tag is a list of macros of the form: name or
-# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
-# is assumed. To prevent a macro definition from being undefined via #undef or
-# recursively expanded use the := operator instead of the = operator.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-PREDEFINED             =
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
-# tag can be used to specify a list of macro names that should be expanded. The
-# macro definition that is found in the sources will be used. Use the PREDEFINED
-# tag if you want to use a different macro definition that overrules the
-# definition found in the source code.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-EXPAND_AS_DEFINED      =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
-# remove all references to function-like macros that are alone on a line, have
-# an all uppercase name, and do not end with a semicolon. Such function macros
-# are typically used for boiler-plate code, and will confuse the parser if not
-# removed.
-# The default value is: YES.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-SKIP_FUNCTION_MACROS   = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES tag can be used to specify one or more tag files. For each tag
-# file the location of the external documentation should be added. The format of
-# a tag file without this location is as follows:
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where loc1 and loc2 can be relative or absolute paths or URLs. See the
-# section "Linking to external documentation" for more information about the use
-# of tag files.
-# Note: Each tag file must have a unique name (where the name does NOT include
-# the path). If a tag file is not located in the directory in which doxygen is
-# run, you must also specify the path to the tagfile here.
-
-TAGFILES               =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
-# tag file that is based on the input files it reads. See section "Linking to
-# external documentation" for more information about the usage of tag files.
-
-GENERATE_TAGFILE       =
-
-# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
-# the class index. If set to NO, only the inherited external classes will be
-# listed.
-# The default value is: NO.
-
-ALLEXTERNALS           = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will be
-# listed.
-# The default value is: YES.
-
-EXTERNAL_GROUPS        = YES
-
-# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
-# the related pages index. If set to NO, only the current project's pages will
-# be listed.
-# The default value is: YES.
-
-EXTERNAL_PAGES         = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of 'which perl').
-# The default file (with absolute path) is: /usr/bin/perl.
-
-PERL_PATH              = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
-# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
-# NO turns the diagrams off. Note that this option also works with HAVE_DOT
-# disabled, but it is recommended to install and use dot, since it yields more
-# powerful graphs.
-# The default value is: YES.
-
-CLASS_DIAGRAMS         = YES
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see:
-# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH            =
-
-# You can include diagrams made with dia in doxygen documentation. Doxygen will
-# then run dia to produce the diagram and insert it in the documentation. The
-# DIA_PATH tag allows you to specify the directory where the dia binary resides.
-# If left empty dia is assumed to be found in the default search path.
-
-DIA_PATH               =
-
-# If set to YES the inheritance and collaboration graphs will hide inheritance
-# and usage relations if the target is undocumented or is not a class.
-# The default value is: YES.
-
-HIDE_UNDOC_RELATIONS   = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz (see:
-# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
-# Bell Labs. The other options in this section have no effect if this option is
-# set to NO
-# The default value is: YES.
-
-HAVE_DOT               = YES
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
-# to run in parallel. When set to 0 doxygen will base this on the number of
-# processors available in the system. You can set it explicitly to a value
-# larger than 0 to get control over the balance between CPU load and processing
-# speed.
-# Minimum value: 0, maximum value: 32, default value: 0.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_NUM_THREADS        = 0
-
-# When you want a differently looking font in the dot files that doxygen
-# generates you can specify the font name using DOT_FONTNAME. You need to make
-# sure dot is able to find the font, which can be done by putting it in a
-# standard location or by setting the DOTFONTPATH environment variable or by
-# setting DOT_FONTPATH to the directory containing the font.
-# The default value is: Helvetica.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTNAME           = Helvetica
-
-# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
-# dot graphs.
-# Minimum value: 4, maximum value: 24, default value: 10.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTSIZE           = 10
-
-# By default doxygen will tell dot to use the default font as specified with
-# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
-# the path where dot can find it using this tag.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTPATH           =
-
-# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
-# each documented class showing the direct and indirect inheritance relations.
-# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CLASS_GRAPH            = YES
-
-# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
-# graph for each documented class showing the direct and indirect implementation
-# dependencies (inheritance, containment, and class references variables) of the
-# class with other documented classes.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-COLLABORATION_GRAPH    = YES
-
-# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
-# groups, showing the direct groups dependencies.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GROUP_GRAPHS           = YES
-
-# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-UML_LOOK               = NO
-
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
-# class node. If there are many fields or methods and many nodes the graph may
-# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
-# number of items for each type to make the size more manageable. Set this to 0
-# for no limit. Note that the threshold may be exceeded by 50% before the limit
-# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
-# but if the number exceeds 15, the total amount of fields shown is limited to
-# 10.
-# Minimum value: 0, maximum value: 100, default value: 10.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-UML_LIMIT_NUM_FIELDS   = 10
-
-# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
-# collaboration graphs will show the relations between templates and their
-# instances.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-TEMPLATE_RELATIONS     = NO
-
-# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
-# YES then doxygen will generate a graph for each documented file showing the
-# direct and indirect include dependencies of the file with other documented
-# files.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INCLUDE_GRAPH          = YES
-
-# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
-# set to YES then doxygen will generate a graph for each documented file showing
-# the direct and indirect include dependencies of the file with other documented
-# files.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INCLUDED_BY_GRAPH      = YES
-
-# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
-# dependency graph for every global function or class method.
-#
-# Note that enabling this option will significantly increase the time of a run.
-# So in most cases it will be better to enable call graphs for selected
-# functions only using the \callgraph command.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CALL_GRAPH             = NO
-
-# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
-# dependency graph for every global function or class method.
-#
-# Note that enabling this option will significantly increase the time of a run.
-# So in most cases it will be better to enable caller graphs for selected
-# functions only using the \callergraph command.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CALLER_GRAPH           = NO
-
-# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
-# hierarchy of all classes instead of a textual one.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GRAPHICAL_HIERARCHY    = YES
-
-# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
-# dependencies a directory has on other directories in a graphical way. The
-# dependency relations are determined by the #include relations between the
-# files in the directories.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DIRECTORY_GRAPH        = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot.
-# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
-# to make the SVG files visible in IE 9+ (other browsers do not have this
-# requirement).
-# Possible values are: png, png:cairo, png:cairo:cairo, png:cairo:gd, png:gd,
-# png:gd:gd, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd, gif, gif:cairo,
-# gif:cairo:gd, gif:gd, gif:gd:gd and svg.
-# The default value is: png.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_IMAGE_FORMAT       = png
-
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-#
-# Note that this requires a modern browser other than Internet Explorer. Tested
-# and working are Firefox, Chrome, Safari, and Opera.
-# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
-# the SVG files visible. Older versions of IE do not have SVG support.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INTERACTIVE_SVG        = NO
-
-# The DOT_PATH tag can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_PATH               =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the \dotfile
-# command).
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOTFILE_DIRS           =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the \mscfile
-# command).
-
-MSCFILE_DIRS           =
-
-# The DIAFILE_DIRS tag can be used to specify one or more directories that
-# contain dia files that are included in the documentation (see the \diafile
-# command).
-
-DIAFILE_DIRS           =
-
-# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
-# path where java can find the plantuml.jar file. If left blank, it is assumed
-# PlantUML is not used or called during a preprocessing step. Doxygen will
-# generate a warning when it encounters a \startuml command in this case and
-# will not generate output for the diagram.
-
-PLANTUML_JAR_PATH      =
-
-# When using plantuml, the specified paths are searched for files specified by
-# the !include statement in a plantuml block.
-
-PLANTUML_INCLUDE_PATH  =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
-# that will be shown in the graph. If the number of nodes in a graph becomes
-# larger than this value, doxygen will truncate the graph, which is visualized
-# by representing a node as a red box. Note that doxygen if the number of direct
-# children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
-# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-# Minimum value: 0, maximum value: 10000, default value: 50.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_GRAPH_MAX_NODES    = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
-# generated by dot. A depth value of 3 means that only nodes reachable from the
-# root by following a path via at most 3 edges will be shown. Nodes that lay
-# further from the root node will be omitted. Note that setting this option to 1
-# or 2 may greatly reduce the computation time needed for large code bases. Also
-# note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-# Minimum value: 0, maximum value: 1000, default value: 0.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-MAX_DOT_GRAPH_DEPTH    = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not seem
-# to support this out of the box.
-#
-# Warning: Depending on the platform used, enabling this option may lead to
-# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
-# read).
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_TRANSPARENT        = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10) support
-# this, this feature is disabled by default.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_MULTI_TARGETS      = NO
-
-# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
-# explaining the meaning of the various boxes and arrows in the dot generated
-# graphs.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GENERATE_LEGEND        = YES
-
-# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
-# files that are used to generate the various graphs.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_CLEANUP            = YES
diff --git a/thirdparty/librdkafka-0.11.4/INTRODUCTION.md b/thirdparty/librdkafka-0.11.4/INTRODUCTION.md
deleted file mode 100644
index 9b712a5..0000000
--- a/thirdparty/librdkafka-0.11.4/INTRODUCTION.md
+++ /dev/null
@@ -1,735 +0,0 @@
-//@file INTRODUCTION.md
-# Introduction to librdkafka - the Apache Kafka C/C++ client library
-
-
-librdkafka is a high performance C implementation of the Apache
-Kafka client, providing a reliable and performant client for production use.
-librdkafka also provides a native C++ interface.
-
-## Contents
-
-The following chapters are available in this document
-
-  * Performance
-    * Performance numbers
-    * High throughput
-    * Low latency
-    * Compression
-  * Message reliability
-  * Usage
-    * Documentation
-    * Initialization
-    * Configuration
-    * Threads and callbacks
-    * Brokers
-    * Producer API
-    * Consumer API
-  * Appendix
-    * Test detailts
-  
-
-
-
-## Performance
-
-librdkafka is a multi-threaded library designed for use on modern hardware and
-it attempts to keep memory copying at a minimal. The payload of produced or
-consumed messages may pass through without any copying
-(if so desired by the application) putting no limit on message sizes.
-
-librdkafka allows you to decide if high throughput is the name of the game,
-or if a low latency service is required, all through the configuration
-property interface.
-
-The two most important configuration properties for performance tuning are:
-
-  * `batch.num.messages` - the minimum number of messages to wait for to
-	  accumulate in the local queue before sending off a message set.
-  * `queue.buffering.max.ms` - how long to wait for batch.num.messages to
-	  fill up in the local queue. A lower value improves latency at the
-          cost of lower throughput and higher per-message overhead.
-          A higher value improves throughput at the expense of latency.
-          The recommended value for high throughput is > 50ms.
-
-
-### Performance numbers
-
-The following performance numbers stem from tests using the following setup:
-
-  * Intel Quad Core i7 at 3.4GHz, 8GB of memory
-  * Disk performance has been shortcut by setting the brokers' flush
-	configuration properties as so:
-	* `log.flush.interval.messages=10000000`
-	* `log.flush.interval.ms=100000`
-  * Two brokers running on the same machine as librdkafka.
-  * One topic with two partitions.
-  * Each broker is leader for one partition each.
-  * Using `rdkafka_performance` program available in the `examples` subdir.
-
-
-
-	
-
-**Test results**
-
-  * **Test1**: 2 brokers, 2 partitions, required.acks=2, 100 byte messages: 
-	  **850000 messages/second**, **85 MB/second**
-
-  * **Test2**: 1 broker, 1 partition, required.acks=0, 100 byte messages: 
-	  **710000 messages/second**, **71 MB/second**
-	  
-  * **Test3**: 2 broker2, 2 partitions, required.acks=2, 100 byte messages,
-	  snappy compression:
-	  **300000 messages/second**, **30 MB/second**
-
-  * **Test4**: 2 broker2, 2 partitions, required.acks=2, 100 byte messages,
-	  gzip compression:
-	  **230000 messages/second**, **23 MB/second**
-
-
-
-**Note**: See the *Test details* chapter at the end of this document for
-	information about the commands executed, etc.
-
-**Note**: Consumer performance tests will be announced soon.
-
-
-### High throughput
-
-The key to high throughput is message batching - waiting for a certain amount
-of messages to accumulate in the local queue before sending them off in
-one large message set or batch to the peer. This amortizes the messaging
-overhead and eliminates the adverse effect of the round trip time (rtt).
-
-`queue.buffering.max.ms` (also called `linger.ms`) allows librdkafka to
-wait up to the specified amount of time to accumulate up to
-`batch.num.messages` in a single batch (MessageSet) before sending
-to the broker. The larger the batch the higher the throughput.
-Enabling `msg` debugging (set `debug` property to `msg`) will emit log
-messages for the accumulation process which lets you see what batch sizes
-are being produced.
-
-Example using `queue.buffering.max.ms=1`:
-
-```
-... test [0]: MessageSet with 1514 message(s) delivered
-... test [3]: MessageSet with 1690 message(s) delivered
-... test [0]: MessageSet with 1720 message(s) delivered
-... test [3]: MessageSet with 2 message(s) delivered
-... test [3]: MessageSet with 4 message(s) delivered
-... test [0]: MessageSet with 4 message(s) delivered
-... test [3]: MessageSet with 11 message(s) delivered
-```
-
-Example using `queue.buffering.max.ms=1000`:
-```
-... test [0]: MessageSet with 10000 message(s) delivered
-... test [0]: MessageSet with 10000 message(s) delivered
-... test [0]: MessageSet with 4667 message(s) delivered
-... test [3]: MessageSet with 10000 message(s) delivered
-... test [3]: MessageSet with 10000 message(s) delivered
-... test [3]: MessageSet with 4476 message(s) delivered
-
-```
-
-
-The default setting of `queue.buffering.max.ms=1` is not suitable for
-high throughput, it is recommended to set this value to >50ms, with
-throughput leveling out somewhere around 100-1000ms depending on
-message produce pattern and sizes.
-
-These setting are set globally (`rd_kafka_conf_t`) but applies on a
-per topic+partition basis.
-
-
-### Low latency
-
-When low latency messaging is required the `queue.buffering.max.ms` should be
-tuned to the maximum permitted producer-side latency.
-Setting queue.buffering.max.ms to 1 will make sure messages are sent as
-soon as possible. You could check out [How to decrease message latency](https://github.com/edenhill/librdkafka/wiki/How-to-decrease-message-latency)
-to find more details.
-Lower buffering time leads to smaller batches and larger per-message overheads,
-increasing network, memory and CPU usage for producers, brokers and consumers.
-
-
-### Compression
-
-Producer message compression is enabled through the `compression.codec`
-configuration property.
-
-Compression is performed on the batch of messages in the local queue, the
-larger the batch the higher likelyhood of a higher compression ratio.
-The local batch queue size is controlled through the `batch.num.messages` and
-`queue.buffering.max.ms` configuration properties as described in the
-**High throughput** chapter above.
-
-
-
-## Message reliability
-
-Message reliability is an important factor of librdkafka - an application
-can rely fully on librdkafka to deliver a message according to the specified
-configuration (`request.required.acks` and `message.send.max.retries`, etc).
-
-If the topic configuration property `request.required.acks` is set to wait
-for message commit acknowledgements from brokers (any value but 0, see
-[`CONFIGURATION.md`](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
-for specifics) then librdkafka will hold on to the message until
-all expected acks have been received, gracefully handling the following events:
-
-  * Broker connection failure
-  * Topic leader change
-  * Produce errors signaled by the broker
-  * Network problems
-
-This is handled automatically by librdkafka and the application does not need
-to take any action at any of the above events.
-The message will be resent up to `message.send.max.retries` times before
-reporting a failure back to the application.
-
-The delivery report callback is used by librdkafka to signal the status of
-a message back to the application, it will be called once for each message
-to report the status of message delivery:
-
-  * If `error_code` is non-zero the message delivery failed and the error_code
-    indicates the nature of the failure (`rd_kafka_resp_err_t` enum).
-  * If `error_code` is zero the message has been successfully delivered.
-
-See Producer API chapter for more details on delivery report callback usage.
-
-The delivery report callback is optional but highly recommended.
-
-
-### Producer message delivery success
-
-When a ProduceRequest is successfully handled by the broker and a
-ProduceResponse is received (also called the ack) without an error code
-the messages from the ProduceRequest are enqueued on the delivery report
-queue (if a delivery report callback has been set) and will be passed to
-the application on the next invocation rd_kafka_poll().
-
-
-### Producer message delivery failure
-
-The following sub-chapters explains how different produce errors
-are handled.
-
-If the error is retryable and there are remaining retry attempts for
-the given message(s), an automatic retry will be scheduled by librdkafka,
-these retries are not visible to the application.
-
-Only permanent errors and temporary errors that have reached their maximum
-retry count will generate a delivery report event to the application with an
-error code set.
-
-The application should typically not attempt to retry producing the message
-on failure, but instead configure librdkafka to perform these retries
-using the `retries` and `retry.backoff.ms` configuration properties.
-
-
-#### Error: Timed out in transmission queue
-
-Internal error ERR__TIMED_OUT_QUEUE.
-
-The connectivity to the broker may be stalled due to networking contention,
-local or remote system issues, etc, and the request has not yet been sent.
-
-The producer can be certain that the message has not been sent to the broker.
-
-This is a retryable error, but is not counted as a retry attempt
-since the message was never actually transmitted.
-
-A retry by librdkafka at this point will not cause duplicate messages.
-
-
-#### Error: Timed out in flight to/from broker
-
-Internal error ERR__TIMED_OUT, ERR__TRANSPORT.
-
-Same reasons as for `Timed out in transmission queue` above, with the
-difference that the message may have been sent to the broker and might
-be stalling waiting for broker replicas to ack the message, or the response
-could be stalled due to networking issues.
-At this point the producer can't know if the message reached the broker,
-nor if the broker wrote the message to disk and replicas.
-
-This is a retryable error.
-
-A retry by librdkafka at this point may cause duplicate messages.
-
-
-#### Error: Temporary broker-side error
-
-Broker errors ERR_REQUEST_TIMED_OUT, ERR_NOT_ENOUGH_REPLICAS,
-ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND.
-
-These errors are considered temporary and librdkafka is will retry them
-if permitted by configuration.
-
-
-#### Error: Temporary errors due to stale metadata
-
-Broker errors ERR_LEADER_NOT_AVAILABLE, ERR_NOT_LEADER_FOR_PARTITION.
-
-These errors are considered temporary and a retry is warranted, a metadata
-request is automatically sent to find a new leader for the partition.
-
-A retry by librdkafka at this point will not cause duplicate messages.
-
-
-#### Error: Local time out
-
-Internal error ERR__MSG_TIMED_OUT.
-
-The message could not be successfully transmitted before `message.timeout.ms`
-expired, typically due to no leader being available or no broker connection.
-The message may have been retried due to other errors but
-those error messages are abstracted by the ERR__MSG_TIMED_OUT error code.
-
-Since the `message.timeout.ms` has passed there will be no more retries
-by librdkafka.
-
-
-#### Error: Permanent errors
-
-Any other error is considered a permanent error and the message
-will fail immediately, generating a delivery report event with the
-distinctive error code.
-
-The full list of permanent errors depend on the broker version and
-will likely grow in the future.
-
-Typical permanent broker errors are:
- * ERR_CORRUPT_MESSAGE
- * ERR_MSG_SIZE_TOO_LARGE  - adjust client's or broker's `message.max.bytes`.
- * ERR_UNKNOWN_TOPIC_OR_PART - topic or partition does not exist,
-                               automatic topic creation is disabled on the
-                               broker or the application is specifying a
-                               partition that does not exist.
- * ERR_RECORD_LIST_TOO_LARGE
- * ERR_INVALID_REQUIRED_ACKS
- * ERR_TOPIC_AUTHORIZATION_FAILED
- * ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT
- * ERR_CLUSTER_AUTHORIZATION_FAILED
-
-
-### Producer retries
-
-The ProduceRequest itself is not retried, instead the messages
-are put back on the internal partition queue by an insert sort
-that maintains their original position (the message order is defined
-at the time a message is initially appended to a partition queue, i.e., after
-partitioning).
-A backoff time (`retry.backoff.ms`) is set on the retried messages which
-effectively blocks retry attempts until the backoff time has expired.
-
-
-### Reordering
-
-As for all retries, if `max.in.flight` > 1 and `retries` > 0, retried messages
-may be produced out of order, since a sub-sequent message in a sub-sequent
-ProduceRequest may already be in-flight (and accepted by the broker)
-by the time the retry for the failing message is sent.
-
-
-
-
-## Usage
-
-### Documentation
-
-The librdkafka API is documented in the
-[`rdkafka.h`](https://github.com/edenhill/librdkafka/blob/master/src/rdkafka.h)
-header file, the configuration properties are documented in
-[`CONFIGURATION.md`](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
-
-### Initialization
-
-The application needs to instantiate a top-level object `rd_kafka_t` which is
-the base container, providing global configuration and shared state.
-It is created by calling `rd_kafka_new()`.
-
-It also needs to instantiate one or more topics (`rd_kafka_topic_t`) to be used
-for producing to or consuming from. The topic object holds topic-specific
-configuration and will be internally populated with a mapping of all available
-partitions and their leader brokers.
-It is created by calling `rd_kafka_topic_new()`.
-
-Both `rd_kafka_t` and `rd_kafka_topic_t` comes with a configuration API which
-is optional.
-Not using the API will cause librdkafka to use its default values which are
-documented in [`CONFIGURATION.md`](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md).
-
-**Note**: An application may create multiple `rd_kafka_t` objects and
-	they share no state.
-
-**Note**: An `rd_kafka_topic_t` object may only be used with the `rd_kafka_t`
-	object it was created from.
-
-
-
-### Configuration
-
-To ease integration with the official Apache Kafka software and lower
-the learning curve, librdkafka implements identical configuration
-properties as found in the official clients of Apache Kafka.
-
-Configuration is applied prior to object creation using the
-`rd_kafka_conf_set()` and `rd_kafka_topic_conf_set()` APIs.
-
-**Note**: The `rd_kafka.._conf_t` objects are not reusable after they have been
-	passed to `rd_kafka.._new()`.
-	The application does not need to free any config resources after a
-	`rd_kafka.._new()` call.
-
-#### Example
-
-    rd_kafka_conf_t *conf;
-    char errstr[512];
-    
-    conf = rd_kafka_conf_new();
-    rd_kafka_conf_set(conf, "compression.codec", "snappy", errstr, sizeof(errstr));
-    rd_kafka_conf_set(conf, "batch.num.messages", "100", errstr, sizeof(errstr));
-    
-    rd_kafka_new(RD_KAFKA_PRODUCER, conf);
-
-
-### Threads and callbacks
-
-librdkafka uses multiple threads internally to fully utilize modern hardware.
-The API is completely thread-safe and the calling application may call any
-of the API functions from any of its own threads at any time.
-
-A poll-based API is used to provide signaling back to the application,
-the application should call rd_kafka_poll() at regular intervals.
-The poll API will call the following configured callbacks (optional):
-
-  * message delivery report callback - signals that a message has been
-    delivered or failed delivery, allowing the application to take action
-    and to release any application resources used in the message.
-  * error callback - signals an error. These errors are usually of an
-    informational nature, i.e., failure to connect to a broker, and the
-    application usually does not need to take any action.
-    The type of error is passed as a rd_kafka_resp_err_t enum value,
-    including both remote broker errors as well as local failures.
-
-
-Optional callbacks not triggered by poll, these may be called from any thread:
-
-  * Logging callback - allows the application to output log messages
-	  generated by librdkafka.
-  * partitioner callback - application provided message partitioner.
-	  The partitioner may be called in any thread at any time, it may be
-	  called multiple times for the same key.
-	  Partitioner function contraints:
-	  * MUST NOT call any rd_kafka_*() functions
-      * MUST NOT block or execute for prolonged periods of time.
-      * MUST return a value between 0 and partition_cnt-1, or the
-          special RD_KAFKA_PARTITION_UA value if partitioning
-              could not be performed.
-
-
-
-### Brokers
-
-librdkafka only needs an initial list of brokers (at least one), called the
-bootstrap brokers.
-It will connect to all the bootstrap brokers, specified by the
-`metadata.broker.list` configuration property or by `rd_kafka_brokers_add()`,
-and query each one for Metadata information which contains the full list of
-brokers, topic, partitions and their leaders in the Kafka cluster.
-
-Broker names are specified as `host[:port]` where the port is optional 
-(default 9092) and the host is either a resolvable hostname or an IPv4 or IPv6
-address.
-If host resolves to multiple addresses librdkafka will round-robin the
-addresses for each connection attempt.
-A DNS record containing all broker address can thus be used to provide a
-reliable bootstrap broker.
-
-### Feature discovery
-
-Apache Kafka broker version 0.10.0 added support for the ApiVersionRequest API
-which allows a client to query a broker for its range of supported API versions.
-
-librdkafka supports this functionality and will query each broker on connect
-for this information (if `api.version.request=true`) and use it to enable or disable
-various protocol features, such as MessageVersion 1 (timestamps), KafkaConsumer, etc.
-
-If the broker fails to respond to the ApiVersionRequest librdkafka will
-assume the broker is too old to support the API and fall back to an older
-broker version's API. These fallback versions are hardcoded in librdkafka
-and is controlled by the `broker.version.fallback` configuration property.
-
-
-
-### Producer API
-
-After setting up the `rd_kafka_t` object with type `RD_KAFKA_PRODUCER` and one
-or more `rd_kafka_topic_t` objects librdkafka is ready for accepting messages
-to be produced and sent to brokers.
-
-The `rd_kafka_produce()` function takes the following arguments:
-
-  * `rkt` - the topic to produce to, previously created with
-	  `rd_kafka_topic_new()`
-  * `partition` - partition to produce to. If this is set to
-	  `RD_KAFKA_PARTITION_UA` (UnAssigned) then the configured partitioner
-		  function will be used to select a target partition.
-  * `msgflags` - 0, or one of:
-	  * `RD_KAFKA_MSG_F_COPY` - librdkafka will immediately make a copy of
-	    the payload. Use this when the payload is in non-persistent
-	    memory, such as the stack.
-	  * `RD_KAFKA_MSG_F_FREE` - let librdkafka free the payload using
-	    `free(3)` when it is done with it.
-	
-	These two flags are mutually exclusive and neither need to be set in
-	which case the payload is neither copied nor freed by librdkafka.
-		
-	If `RD_KAFKA_MSG_F_COPY` flag is not set no data copying will be
-	performed and librdkafka will hold on the payload pointer until
-	the message	has been delivered or fails.
-	The delivery report callback will be called when librdkafka is done
-	with the message to let the application regain ownership of the
-	payload memory.
-	The application must not free the payload in the delivery report
-	callback if `RD_KAFKA_MSG_F_FREE is set`.
-  * `payload`,`len` - the message payload
-  * `key`,`keylen` - an optional message key which can be used for partitioning.
-	  It will be passed to the topic partitioner callback, if any, and
-	  will be attached to the message when sending to the broker.
-  * `msg_opaque` - an optional application-provided per-message opaque pointer
-	  that will be provided in the message delivery callback to let
-	  the application reference a specific message.
-
-
-`rd_kafka_produce()` is a non-blocking API, it will enqueue the message
-on an internal queue and return immediately.
-If the number of queued messages would exceed the `queue.buffering.max.messages`
-configuration property then `rd_kafka_produce()` returns -1 and sets errno
-to `ENOBUFS` and last_error to `RD_KAFKA_RESP_ERR__QUEUE_FULL`, thus
-providing a backpressure mechanism.
-
-
-**Note**: See `examples/rdkafka_performance.c` for a producer implementation.
-
-
-### Simple Consumer API (legacy)
-
-NOTE: For the high-level KafkaConsumer interface see rd_kafka_subscribe (rdkafka.h) or KafkaConsumer (rdkafkacpp.h)
-
-The consumer API is a bit more stateful than the producer API.
-After creating `rd_kafka_t` with type `RD_KAFKA_CONSUMER` and
-`rd_kafka_topic_t` instances the application must also start the consumer
-for a given partition by calling `rd_kafka_consume_start()`.
-
-`rd_kafka_consume_start()` arguments:
-
-  * `rkt` - the topic to start consuming from, previously created with
-    	  `rd_kafka_topic_new()`.
-  * `partition` - partition to consume from.
-  * `offset` - message offset to start consuming from. This may either be an
-    	     absolute message offset or one of the two special offsets:
-	     `RD_KAFKA_OFFSET_BEGINNING` to start consuming from the beginning
-	     of the partition's queue (oldest message), or
-	     `RD_KAFKA_OFFSET_END` to start consuming at the next message to be
-	     produced to the partition, or
-	     `RD_KAFKA_OFFSET_STORED` to use the offset store.
-
-After a topic+partition consumer has been started librdkafka will attempt
-to keep `queued.min.messages` messages in the local queue by repeatedly
-fetching batches of messages from the broker.
-
-This local message queue is then served to the application through three
-different consume APIs:
-
-  * `rd_kafka_consume()` - consumes a single message
-  * `rd_kafka_consume_batch()` - consumes one or more messages
-  * `rd_kafka_consume_callback()` - consumes all messages in the local
-    queue and calls a callback function for each one.
-
-These three APIs are listed above the ascending order of performance,
-`rd_kafka_consume()` being the slowest and `rd_kafka_consume_callback()` being
-the fastest. The different consume variants are provided to cater for different
-application needs.
-
-A consumed message, as provided or returned by each of the consume functions,
-is represented by the `rd_kafka_message_t` type.
-
-`rd_kafka_message_t` members:
-
-  * `err` - Error signaling back to the application. If this field is non-zero
-    	  the `payload` field should be considered an error message and
-	  `err` is an error code (`rd_kafka_resp_err_t`).
-	  If `err` is zero then the message is a proper fetched message
-	  and `payload` et.al contains message payload data.
-  * `rkt`,`partition` - Topic and partition for this message or error.
-  * `payload`,`len` - Message payload data or error message (err!=0).
-  * `key`,`key_len` - Optional message key as specified by the producer
-  * `offset` - Message offset
-
-Both the `payload` and `key` memory, as well as the message as a whole, is
-owned by librdkafka and must not be used after an `rd_kafka_message_destroy()`
-call. librdkafka will share the same messageset receive buffer memory for all
-message payloads of that messageset to avoid excessive copying which means
-that if the application decides to hang on to a single `rd_kafka_message_t`
-it will hinder the backing memory to be released for all other messages
-from the same messageset.
-
-When the application is done consuming messages from a topic+partition it
-should call `rd_kafka_consume_stop()` to stop the consumer. This will also
-purge any messages currently in the local queue.
-
-
-**Note**: See `examples/rdkafka_performance.c` for a consumer implementation.
-
-
-#### Offset management
-
-Broker based offset management is available for broker version >= 0.9.0
-in conjunction with using the high-level KafkaConsumer interface (see
-rdkafka.h or rdkafkacpp.h)
-
-Offset management is also available through a local offset file store, where the
-offset is periodically written to a local file for each topic+partition
-according to the following topic configuration properties:
-
-  * `auto.commit.enable`
-  * `auto.commit.interval.ms`
-  * `offset.store.path`
-  * `offset.store.sync.interval.ms`
-
-There is currently no support for offset management with ZooKeeper.
-
-
-
-#### Consumer groups
-
-Broker based consumer groups (requires Apache Kafka broker >=0.9) are supported,
-see KafkaConsumer in rdkafka.h or rdkafkacpp.h
-
-
-### Topics
-
-#### Topic auto creation
-
-Topic auto creation is supported by librdkafka.
-The broker needs to be configured with `auto.create.topics.enable=true`.
-
-
-
-### Metadata
-
-#### < 0.9.3
-Previous to the 0.9.3 release librdkafka's metadata handling
-was chatty and excessive, which usually isn't a problem in small
-to medium-sized clusters, but in large clusters with a large amount
-of librdkafka clients the metadata requests could hog broker CPU and bandwidth.
-
-#### > 0.9.3
-
-The remaining Metadata sections describe the current behaviour.
-
-**Note:** "Known topics" in the following section means topics for
-          locally created `rd_kafka_topic_t` objects.
-
-
-#### Query reasons
-
-There are four reasons to query metadata:
-
- * brokers - update/populate cluster broker list, so the client can
-             find and connect to any new brokers added.
-
- * specific topic - find leader or partition count for specific topic
-
- * known topics - same, but for all locally known topics.
-
- * all topics - get topic names for consumer group wildcard subscription
-                matching
-
-The above list is sorted so that the sub-sequent entries contain the
-information above, e.g., 'known topics' contains enough information to
-also satisfy 'specific topic' and 'brokers'.
-
-
-#### Caching strategy
-
-The prevalent cache timeout is `metadata.max.age.ms`, any cached entry
-will remain authoritative for this long or until a relevant broker error
-is returned.
-
-
- * brokers - eternally cached, the broker list is additative.
-
- * topics - cached for `metadata.max.age.ms`
-
-
-
-
-## Appendix
-
-### Test details
-
-#### Test1: Produce to two brokers, two partitions, required.acks=2, 100 byte messages
-
-Each broker is leader for one of the two partitions.
-The random partitioner is used (default) and each broker and partition is
-assigned approximately 250000 messages each.
-
-**Command:**
-
-    # examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test1:TwoBrokers:500kmsgs:100bytes" -S 1 -a 2
-	....
-    % 500000 messages and 50000000 bytes sent in 587ms: 851531 msgs/s and 85.15 Mb/s, 0 messages failed, no compression
-
-**Result:**
-
-Message transfer rate is approximately **850000 messages per second**,
-**85 megabytes per second**.
-
-
-
-#### Test2: Produce to one broker, one partition, required.acks=0, 100 byte messages
-
-**Command:**
-
-    # examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test2:OneBrokers:500kmsgs:100bytes" -S 1 -a 0 -p 1
-	....
-	% 500000 messages and 50000000 bytes sent in 698ms: 715994 msgs/s and 71.60 Mb/s, 0 messages failed, no compression
-
-**Result:**
-
-Message transfer rate is approximately **710000 messages per second**,
-**71 megabytes per second**.
-
-
-
-#### Test3: Produce to two brokers, two partitions, required.acks=2, 100 byte messages, snappy compression
-
-**Command:**
-
-	# examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test3:TwoBrokers:500kmsgs:100bytes:snappy" -S 1 -a 2 -z snappy
-	....
-	% 500000 messages and 50000000 bytes sent in 1672ms: 298915 msgs/s and 29.89 Mb/s, 0 messages failed, snappy compression
-
-**Result:**
-
-Message transfer rate is approximately **300000 messages per second**,
-**30 megabytes per second**.
-
-
-#### Test4: Produce to two brokers, two partitions, required.acks=2, 100 byte messages, gzip compression
-
-**Command:**
-
-	# examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test3:TwoBrokers:500kmsgs:100bytes:gzip" -S 1 -a 2 -z gzip
-	....
-	% 500000 messages and 50000000 bytes sent in 2111ms: 236812 msgs/s and 23.68 Mb/s, 0 messages failed, gzip compression
-
-**Result:**
-
-Message transfer rate is approximately **230000 messages per second**,
-**23 megabytes per second**.
-
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE b/thirdparty/librdkafka-0.11.4/LICENSE
deleted file mode 100644
index ba78cc2..0000000
--- a/thirdparty/librdkafka-0.11.4/LICENSE
+++ /dev/null
@@ -1,25 +0,0 @@
-librdkafka - Apache Kafka C driver library
-
-Copyright (c) 2012, Magnus Edenhill
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met: 
-
-1. Redistributions of source code must retain the above copyright notice,
-   this list of conditions and the following disclaimer. 
-2. Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution. 
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.crc32c b/thirdparty/librdkafka-0.11.4/LICENSE.crc32c
deleted file mode 100644
index 482a345..0000000
--- a/thirdparty/librdkafka-0.11.4/LICENSE.crc32c
+++ /dev/null
@@ -1,28 +0,0 @@
-# For src/crc32c.c copied (with modifications) from
-# http://stackoverflow.com/a/17646775/1821055
-
-/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction
- * Copyright (C) 2013 Mark Adler
- * Version 1.1  1 Aug 2013  Mark Adler
- */
-
-/*
-  This software is provided 'as-is', without any express or implied
-  warranty.  In no event will the author be held liable for any damages
-  arising from the use of this software.
-
-  Permission is granted to anyone to use this software for any purpose,
-  including commercial applications, and to alter it and redistribute it
-  freely, subject to the following restrictions:
-
-  1. The origin of this software must not be misrepresented; you must not
-     claim that you wrote the original software. If you use this software
-     in a product, an acknowledgment in the product documentation would be
-     appreciated but is not required.
-  2. Altered source versions must be plainly marked as such, and must not be
-     misrepresented as being the original software.
-  3. This notice may not be removed or altered from any source distribution.
-
-  Mark Adler
-  madler@alumni.caltech.edu
- */
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.lz4 b/thirdparty/librdkafka-0.11.4/LICENSE.lz4
deleted file mode 100644
index 353dfb4..0000000
--- a/thirdparty/librdkafka-0.11.4/LICENSE.lz4
+++ /dev/null
@@ -1,26 +0,0 @@
-src/xxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3
-
-LZ4 Library
-Copyright (c) 2011-2016, Yann Collet
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice, this
-  list of conditions and the following disclaimer in the documentation and/or
-  other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.murmur2 b/thirdparty/librdkafka-0.11.4/LICENSE.murmur2
deleted file mode 100644
index 296fffa..0000000
--- a/thirdparty/librdkafka-0.11.4/LICENSE.murmur2
+++ /dev/null
@@ -1,25 +0,0 @@
-parts of src/rdmurmur2.c: git@github.com:abrandoned/murmur2.git
-
-
-MurMurHash2 Library
-//-----------------------------------------------------------------------------
-// MurmurHash2 was written by Austin Appleby, and is placed in the public
-// domain. The author hereby disclaims copyright to this source code.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.pycrc b/thirdparty/librdkafka-0.11.4/LICENSE.pycrc
deleted file mode 100644
index 71baded..0000000
--- a/thirdparty/librdkafka-0.11.4/LICENSE.pycrc
+++ /dev/null
@@ -1,23 +0,0 @@
-The following license applies to the files rdcrc32.c and rdcrc32.h which
-have been generated by the pycrc tool.
-============================================================================
-
-Copyright (c) 2006-2012, Thomas Pircher <te...@gmx.net>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.queue b/thirdparty/librdkafka-0.11.4/LICENSE.queue
deleted file mode 100644
index 14bbf93..0000000
--- a/thirdparty/librdkafka-0.11.4/LICENSE.queue
+++ /dev/null
@@ -1,31 +0,0 @@
-For sys/queue.h:
-
- * Copyright (c) 1991, 1993
- *	The Regents of the University of California.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *	@(#)queue.h	8.5 (Berkeley) 8/20/94
- * $FreeBSD$
\ No newline at end of file
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.regexp b/thirdparty/librdkafka-0.11.4/LICENSE.regexp
deleted file mode 100644
index 5fa0b10..0000000
--- a/thirdparty/librdkafka-0.11.4/LICENSE.regexp
+++ /dev/null
@@ -1,5 +0,0 @@
-regexp.c and regexp.h from https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684
-
-"
-These libraries are in the public domain (or the equivalent where that is not possible). You can do anything you want with them. You have no legal obligation to do anything else, although I appreciate attribution.
-"
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.snappy b/thirdparty/librdkafka-0.11.4/LICENSE.snappy
deleted file mode 100644
index baa6cfe..0000000
--- a/thirdparty/librdkafka-0.11.4/LICENSE.snappy
+++ /dev/null
@@ -1,36 +0,0 @@
-######################################################################
-# LICENSE.snappy covers files: snappy.c, snappy.h, snappy_compat.h   #
-# originally retrieved from http://github.com/andikleen/snappy-c     #
-# git revision 8015f2d28739b9a6076ebaa6c53fe27bc238d219              #
-######################################################################
-
-The snappy-c code is under the same license as the original snappy source
-
-Copyright 2011 Intel Corporation All Rights Reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-    * Neither the name of Intel Corporation nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.tinycthread b/thirdparty/librdkafka-0.11.4/LICENSE.tinycthread
deleted file mode 100644
index 0ceadef..0000000
--- a/thirdparty/librdkafka-0.11.4/LICENSE.tinycthread
+++ /dev/null
@@ -1,26 +0,0 @@
-From https://github.com/tinycthread/tinycthread/README.txt c57166cd510ffb5022dd5f127489b131b61441b9
-
-License
--------
-
-Copyright (c) 2012 Marcus Geelnard
-              2013-2014 Evan Nemerson
-
-This software is provided 'as-is', without any express or implied
-warranty. In no event will the authors be held liable for any damages
-arising from the use of this software.
-
-Permission is granted to anyone to use this software for any purpose,
-including commercial applications, and to alter it and redistribute it
-freely, subject to the following restrictions:
-
-    1. The origin of this software must not be misrepresented; you must not
-    claim that you wrote the original software. If you use this software
-    in a product, an acknowledgment in the product documentation would be
-    appreciated but is not required.
-
-    2. Altered source versions must be plainly marked as such, and must not be
-    misrepresented as being the original software.
-
-    3. This notice may not be removed or altered from any source
-    distribution.
diff --git a/thirdparty/librdkafka-0.11.4/LICENSE.wingetopt b/thirdparty/librdkafka-0.11.4/LICENSE.wingetopt
deleted file mode 100644
index 4c28701..0000000
--- a/thirdparty/librdkafka-0.11.4/LICENSE.wingetopt
+++ /dev/null
@@ -1,49 +0,0 @@
-For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt
-
-/*
- * Copyright (c) 2002 Todd C. Miller <To...@courtesan.com>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- * Sponsored in part by the Defense Advanced Research Projects
- * Agency (DARPA) and Air Force Research Laboratory, Air Force
- * Materiel Command, USAF, under agreement number F39502-99-1-0512.
- */
-/*-
- * Copyright (c) 2000 The NetBSD Foundation, Inc.
- * All rights reserved.
- *
- * This code is derived from software contributed to The NetBSD Foundation
- * by Dieter Baron and Thomas Klausner.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
diff --git a/thirdparty/librdkafka-0.11.4/LICENSES.txt b/thirdparty/librdkafka-0.11.4/LICENSES.txt
deleted file mode 100644
index ee8a6f4..0000000
--- a/thirdparty/librdkafka-0.11.4/LICENSES.txt
+++ /dev/null
@@ -1,313 +0,0 @@
-LICENSE
---------------------------------------------------------------
-librdkafka - Apache Kafka C driver library
-
-Copyright (c) 2012, Magnus Edenhill
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met: 
-
-1. Redistributions of source code must retain the above copyright notice,
-   this list of conditions and the following disclaimer. 
-2. Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution. 
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-
-LICENSE.crc32c
---------------------------------------------------------------
-# For src/crc32c.c copied (with modifications) from
-# http://stackoverflow.com/a/17646775/1821055
-
-/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction
- * Copyright (C) 2013 Mark Adler
- * Version 1.1  1 Aug 2013  Mark Adler
- */
-
-/*
-  This software is provided 'as-is', without any express or implied
-  warranty.  In no event will the author be held liable for any damages
-  arising from the use of this software.
-
-  Permission is granted to anyone to use this software for any purpose,
-  including commercial applications, and to alter it and redistribute it
-  freely, subject to the following restrictions:
-
-  1. The origin of this software must not be misrepresented; you must not
-     claim that you wrote the original software. If you use this software
-     in a product, an acknowledgment in the product documentation would be
-     appreciated but is not required.
-  2. Altered source versions must be plainly marked as such, and must not be
-     misrepresented as being the original software.
-  3. This notice may not be removed or altered from any source distribution.
-
-  Mark Adler
-  madler@alumni.caltech.edu
- */
-
-
-LICENSE.lz4
---------------------------------------------------------------
-src/xxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3
-
-LZ4 Library
-Copyright (c) 2011-2016, Yann Collet
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice, this
-  list of conditions and the following disclaimer in the documentation and/or
-  other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-LICENSE.murmur2
---------------------------------------------------------------
-parts of src/rdmurmur2.c: git@github.com:abrandoned/murmur2.git
-
-
-MurMurHash2 Library
-//-----------------------------------------------------------------------------
-// MurmurHash2 was written by Austin Appleby, and is placed in the public
-// domain. The author hereby disclaims copyright to this source code.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-LICENSE.pycrc
---------------------------------------------------------------
-The following license applies to the files rdcrc32.c and rdcrc32.h which
-have been generated by the pycrc tool.
-============================================================================
-
-Copyright (c) 2006-2012, Thomas Pircher <te...@gmx.net>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-
-LICENSE.queue
---------------------------------------------------------------
-For sys/queue.h:
-
- * Copyright (c) 1991, 1993
- *	The Regents of the University of California.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *	@(#)queue.h	8.5 (Berkeley) 8/20/94
- * $FreeBSD$
-
-LICENSE.regexp
---------------------------------------------------------------
-regexp.c and regexp.h from https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684
-
-"
-These libraries are in the public domain (or the equivalent where that is not possible). You can do anything you want with them. You have no legal obligation to do anything else, although I appreciate attribution.
-"
-
-
-LICENSE.snappy
---------------------------------------------------------------
-######################################################################
-# LICENSE.snappy covers files: snappy.c, snappy.h, snappy_compat.h   #
-# originally retrieved from http://github.com/andikleen/snappy-c     #
-# git revision 8015f2d28739b9a6076ebaa6c53fe27bc238d219              #
-######################################################################
-
-The snappy-c code is under the same license as the original snappy source
-
-Copyright 2011 Intel Corporation All Rights Reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-    * Neither the name of Intel Corporation nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-LICENSE.tinycthread
---------------------------------------------------------------
-From https://github.com/tinycthread/tinycthread/README.txt c57166cd510ffb5022dd5f127489b131b61441b9
-
-License
--------
-
-Copyright (c) 2012 Marcus Geelnard
-              2013-2014 Evan Nemerson
-
-This software is provided 'as-is', without any express or implied
-warranty. In no event will the authors be held liable for any damages
-arising from the use of this software.
-
-Permission is granted to anyone to use this software for any purpose,
-including commercial applications, and to alter it and redistribute it
-freely, subject to the following restrictions:
-
-    1. The origin of this software must not be misrepresented; you must not
-    claim that you wrote the original software. If you use this software
-    in a product, an acknowledgment in the product documentation would be
-    appreciated but is not required.
-
-    2. Altered source versions must be plainly marked as such, and must not be
-    misrepresented as being the original software.
-
-    3. This notice may not be removed or altered from any source
-    distribution.
-
-
-LICENSE.wingetopt
---------------------------------------------------------------
-For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt
-
-/*
- * Copyright (c) 2002 Todd C. Miller <To...@courtesan.com>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- * Sponsored in part by the Defense Advanced Research Projects
- * Agency (DARPA) and Air Force Research Laboratory, Air Force
- * Materiel Command, USAF, under agreement number F39502-99-1-0512.
- */
-/*-
- * Copyright (c) 2000 The NetBSD Foundation, Inc.
- * All rights reserved.
- *
- * This code is derived from software contributed to The NetBSD Foundation
- * by Dieter Baron and Thomas Klausner.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
diff --git a/thirdparty/librdkafka-0.11.4/Makefile b/thirdparty/librdkafka-0.11.4/Makefile
deleted file mode 100755
index e428c83..0000000
--- a/thirdparty/librdkafka-0.11.4/Makefile
+++ /dev/null
@@ -1,68 +0,0 @@
-LIBSUBDIRS=	src src-cpp
-
-CHECK_FILES+=	CONFIGURATION.md \
-		examples/rdkafka_example examples/rdkafka_performance \
-		examples/rdkafka_example_cpp
-
-PACKAGE_NAME?=	librdkafka
-VERSION?=	$(shell python packaging/get_version.py src/rdkafka.h)
-
-# Jenkins CI integration
-BUILD_NUMBER ?= 1
-
-.PHONY:
-
-all: mklove-check libs CONFIGURATION.md check
-
-include mklove/Makefile.base
-
-libs:
-	@(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d || exit $?; done)
-
-CONFIGURATION.md: src/rdkafka.h examples
-	@printf "$(MKL_YELLOW)Updating$(MKL_CLR_RESET)\n"
-	@echo '//@file' > CONFIGURATION.md.tmp
-	@(examples/rdkafka_performance -X list >> CONFIGURATION.md.tmp; \
-		cmp CONFIGURATION.md CONFIGURATION.md.tmp || \
-		mv CONFIGURATION.md.tmp CONFIGURATION.md; \
-		rm -f CONFIGURATION.md.tmp)
-
-file-check: CONFIGURATION.md LICENSES.txt examples
-check: file-check
-	@(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d $@ || exit $?; done)
-
-install:
-	@(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d $@ || exit $?; done)
-
-examples tests: .PHONY libs
-	$(MAKE) -C $@
-
-docs:
-	doxygen Doxyfile
-	@echo "Documentation generated in staging-docs"
-
-clean-docs:
-	rm -rf staging-docs
-
-clean:
-	@$(MAKE) -C tests $@
-	@$(MAKE) -C examples $@
-	@(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d $@ ; done)
-
-distclean: clean
-	./configure --clean
-	rm -f config.log config.log.old
-
-archive:
-	git archive --prefix=$(PACKAGE_NAME)-$(VERSION)/ \
-		-o $(PACKAGE_NAME)-$(VERSION).tar.gz HEAD
-	git archive --prefix=$(PACKAGE_NAME)-$(VERSION)/ \
-		-o $(PACKAGE_NAME)-$(VERSION).zip HEAD
-
-rpm: distclean
-	$(MAKE) -C packaging/rpm
-
-LICENSES.txt: .PHONY
-	@(for i in LICENSE LICENSE.*[^~] ; do (echo "$$i" ; echo "--------------------------------------------------------------" ; cat $$i ; echo "" ; echo "") ; done) > $@.tmp
-	@cmp $@ $@.tmp || mv $@.tmp $@ ; rm -f $@.tmp
-
diff --git a/thirdparty/librdkafka-0.11.4/README.md b/thirdparty/librdkafka-0.11.4/README.md
deleted file mode 100644
index 1c3a804..0000000
--- a/thirdparty/librdkafka-0.11.4/README.md
+++ /dev/null
@@ -1,168 +0,0 @@
-librdkafka - the Apache Kafka C/C++ client library
-==================================================
-
-Copyright (c) 2012-2018, [Magnus Edenhill](http://www.edenhill.se/).
-
-[https://github.com/edenhill/librdkafka](https://github.com/edenhill/librdkafka)
-
-[![Gitter chat](https://badges.gitter.im/edenhill/librdkafka.png)](https://gitter.im/edenhill/librdkafka) [![Build status](https://doozer.io/badge/edenhill/librdkafka/buildstatus/master)](https://doozer.io/user/edenhill/librdkafka)
-
-
-**librdkafka** is a C library implementation of the
-[Apache Kafka](http://kafka.apache.org/) protocol, containing both
-Producer and Consumer support. It was designed with message delivery reliability
-and high performance in mind, current figures exceed 1 million msgs/second for
-the producer and 3 million msgs/second for the consumer.
-
-**librdkafka** is licensed under the 2-clause BSD license.
-
-For an introduction to the performance and usage of librdkafka, see
-[INTRODUCTION.md](https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md)
-
-See the [wiki](https://github.com/edenhill/librdkafka/wiki) for a FAQ.
-
-**NOTE**: The `master` branch is actively developed, use latest release for production use.
-
-
-# Overview #
-  * High-level producer
-  * High-level balanced KafkaConsumer (requires broker >= 0.9)
-  * Simple (legacy) consumer
-  * Compression: snappy, gzip, lz4
-  * [SSL](https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka) support
-  * [SASL](https://github.com/edenhill/librdkafka/wiki/Using-SASL-with-librdkafka) (GSSAPI/Kerberos/SSPI, PLAIN, SCRAM) support
-  * Broker version support: >=0.8 (see [Broker version compatibility](https://github.com/edenhill/librdkafka/wiki/Broker-version-compatibility))
-  * Stable C & C++ APIs (ABI safety guaranteed for C)
-  * [Statistics](https://github.com/edenhill/librdkafka/wiki/Statistics) metrics
-  * Debian package: librdkafka1 and librdkafka-dev in Debian and Ubuntu
-  * RPM package: librdkafka and librdkafka-devel
-  * Gentoo package: dev-libs/librdkafka
-  * Portable: runs on Linux, OSX, Win32, Solaris, FreeBSD, AIX, ...
-
-
-# Language bindings #
-
-  * C#/.NET: [confluent-kafka-dotnet](https://github.com/confluentinc/confluent-kafka-dotnet) (based on [rdkafka-dotnet](https://github.com/ah-/rdkafka-dotnet))
-  * C++: [cppkafka](https://github.com/mfontanini/cppkafka)
-  * D (C-like): [librdkafka](https://github.com/DlangApache/librdkafka/)
-  * D (C++-like): [librdkafkad](https://github.com/tamediadigital/librdkafka-d)
-  * Erlang: [erlkaf](https://github.com/silviucpp/erlkaf)
-  * Go: [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go)
-  * Haskell (kafka, conduit, avro, schema registry): [hw-kafka](https://github.com/haskell-works/hw-kafka)
-  * Haskell: [haskakafka](https://github.com/cosbynator/haskakafka)
-  * Haskell: [haskell-kafka](https://github.com/yanatan16/haskell-kafka)
-  * Lua: [luardkafka](https://github.com/mistsv/luardkafka)
-  * Node.js: [node-rdkafka](https://github.com/Blizzard/node-rdkafka)
-  * Node.js: [node-kafka](https://github.com/sutoiku/node-kafka)
-  * Node.js: [kafka-native](https://github.com/jut-io/node-kafka-native)
-  * OCaml: [ocaml-kafka](https://github.com/didier-wenzek/ocaml-kafka)
-  * PHP: [phpkafka](https://github.com/EVODelavega/phpkafka)
-  * PHP: [php-rdkafka](https://github.com/arnaud-lb/php-rdkafka)
-  * Python: [confluent-kafka-python](https://github.com/confluentinc/confluent-kafka-python)
-  * Python: [PyKafka](https://github.com/Parsely/pykafka)
-  * Ruby: [Hermann](https://github.com/reiseburo/hermann)
-  * Ruby: [rdkafka-ruby](https://github.com/appsignal/rdkafka-ruby)
-  * Rust: [rust-rdkafka](https://github.com/fede1024/rust-rdkafka)
-  * Tcl: [KafkaTcl](https://github.com/flightaware/kafkatcl)
-  * Swift: [Perfect-Kafka](https://github.com/PerfectlySoft/Perfect-Kafka)
-
-# Users of librdkafka #
-
-  * [kafkacat](https://github.com/edenhill/kafkacat) - Apache Kafka swiss army knife
-  * [Wikimedia's varnishkafka](https://github.com/wikimedia/varnishkafka) - Varnish cache web log producer
-  * [Wikimedia's kafkatee](https://github.com/wikimedia/analytics-kafkatee) - Kafka multi consumer with filtering and fanout
-  * [rsyslog](http://www.rsyslog.com)
-  * [syslog-ng](http://syslog-ng.org)
-  * [collectd](http://collectd.org)
-  * [logkafka](https://github.com/Qihoo360/logkafka) - Collect logs and send to Kafka
-  * [redBorder](http://www.redborder.net)
-  * [Headweb](http://www.headweb.com/)
-  * [Produban's log2kafka](https://github.com/Produban/log2kafka) - Web log producer
-  * [fuse_kafka](https://github.com/yazgoo/fuse_kafka) - FUSE file system layer
-  * [node-kafkacat](https://github.com/Rafflecopter/node-kafkacat)
-  * [OVH](http://ovh.com) - [AntiDDOS](http://www.slideshare.net/hugfrance/hugfr-6-oct2014ovhantiddos)
-  * [otto.de](http://otto.de)'s [trackdrd](https://github.com/otto-de/trackrdrd) - Varnish log reader
-  * [Microwish](https://github.com/microwish) has a range of Kafka utilites for log aggregation, HDFS integration, etc.
-  * [aidp](https://github.com/weiboad/aidp) - kafka consumer embedded Lua scripting language in data process framework
-  * [Yandex ClickHouse](https://github.com/yandex/ClickHouse)
-  * [NXLog](http://nxlog.co/) - Enterprise logging system, Kafka input/output plugin.
-  * large unnamed financial institutions
-  * and many more..
-  * *Let [me](mailto:rdkafka@edenhill.se) know if you are using librdkafka*
-
-
-
-# Usage
-
-## Requirements
-	The GNU toolchain
-	GNU make
-   	pthreads
-	zlib (optional, for gzip compression support)
-	libssl-dev (optional, for SSL and SASL SCRAM support)
-	libsasl2-dev (optional, for SASL GSSAPI support)
-
-## Instructions
-
-### Building
-
-      ./configure
-      make
-      sudo make install
-
-
-**NOTE**: See [README.win32](README.win32) for instructions how to build
-          on Windows with Microsoft Visual Studio.
-
-**NOTE**: See [CMake instructions](packaging/cmake/README.md) for experimental
-          CMake build (unsupported).
-
-
-### Usage in code
-
-See [examples/rdkafka_example.c](https://github.com/edenhill/librdkafka/blob/master/examples/rdkafka_example.c) for an example producer and consumer.
-
-Link your program with `-lrdkafka -lz -lpthread -lrt`.
-
-
-## Documentation
-
-The public APIs are documented in their respective header files:
- * The **C** API is documented in [src/rdkafka.h](src/rdkafka.h)
- * The **C++** API is documented in [src-cpp/rdkafkacpp.h](src-cpp/rdkafkacpp.h)
-
-To generate Doxygen documents for the API, type:
-
-    make docs
-
-
-Configuration properties are documented in
-[CONFIGURATION.md](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
-
-For a librdkafka introduction, see
-[INTRODUCTION.md](https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md)
-
-
-## Examples
-
-See the `examples/`sub-directory.
-
-
-## Tests
-
-See the `tests/`sub-directory.
-
-
-## Support
-
-File bug reports, feature requests and questions using
-[GitHub Issues](https://github.com/edenhill/librdkafka/issues)
-
-
-Questions and discussions are also welcome on irc.freenode.org, #apache-kafka,
-nickname Snaps.
-
-
-### Commercial support
-
-Commercial support is available from [Edenhill services](http://www.edenhill.se)
diff --git a/thirdparty/librdkafka-0.11.4/README.win32 b/thirdparty/librdkafka-0.11.4/README.win32
deleted file mode 100644
index de9b5e4..0000000
--- a/thirdparty/librdkafka-0.11.4/README.win32
+++ /dev/null
@@ -1,28 +0,0 @@
-
-Native win32 build instructions using Microsoft Visual Studio 2013 (MSVC).
-
-Requirements:
- * zlib is installed automatically from NuGet,
-   but probably requires the NuGet VS extension.
- * OpenSSL-win32 must be installed in C:\OpenSSL-win32.
-   Download and install the latest v1.0.2 non-light package from:
-   https://slproweb.com/products/Win32OpenSSL.html
-   (This would be using NuGet too but the current
-    OpenSSL packages are outdated and with broken
-    dependencies, so no luck)
-
-The Visual Studio solution file for librdkafka resides in win32/librdkafka.sln
-
-Artifacts:
- - C library
- - C++ library
- - rdkafka_example
- - tests
-
- Missing:
-  - remaining tools (rdkafka_performance, etc)
-  - SASL support (no official Cyrus libsasl2 DLLs available)
-
-If you build librdkafka with an external tool (ie CMake) you can get rid of the 
-__declspec(dllexport) / __declspec(dllimport) decorations by adding a define
--DLIBRDKAFKA_STATICLIB to your CFLAGS
diff --git a/thirdparty/librdkafka-0.11.4/configure b/thirdparty/librdkafka-0.11.4/configure
deleted file mode 100755
index a76452a..0000000
--- a/thirdparty/librdkafka-0.11.4/configure
+++ /dev/null
@@ -1,214 +0,0 @@
-#!/usr/bin/env bash
-#
-
-BASHVER=$(expr ${BASH_VERSINFO[0]} \* 1000 + ${BASH_VERSINFO[1]})
-
-if [ "$BASHVER" -lt 3002 ]; then
-    echo "ERROR: mklove requires bash version 3.2 or later but you are using $BASH_VERSION ($BASHVER)"
-    echo "       See https://github.com/edenhill/mklove/issues/15"
-    exit 1
-fi
-
-MKL_CONFIGURE_ARGS="$0 $*"
-
-# Load base module
-source mklove/modules/configure.base
-
-# Read some special command line options right away that must be known prior to
-# sourcing modules.
-mkl_in_list "$*" "--no-download" && MKL_NO_DOWNLOAD=1
-# Disable downloads when --help is used to avoid blocking calls.
-mkl_in_list "$*" "--help" && MKL_NO_DOWNLOAD=1
-mkl_in_list "$*" "--debug" && MKL_DEBUG=1
-
-# This is the earliest possible time to check for color support in
-# terminal because mkl_check_terminal_color_support uses mkl_dbg which
-# needs to know if MKL_DEBUG is set
-mkl_check_terminal_color_support
-
-# Delete temporary Makefile and header files on exit.
-trap "{ rm -f $MKL_OUTMK $MKL_OUTH; }" EXIT
-
-
-
-##
-## Load builtin modules
-##
-
-# Builtin options, etc.
-mkl_require builtin
-
-# Host/target support
-mkl_require host
-
-# Compiler detection
-mkl_require cc
-
-
-# Load application provided modules (in current directory), if any.
-for fname in configure.* ; do
-    if [[ $fname = 'configure.*' ]]; then
-        continue
-    fi
-
-    # Skip temporary files
-    if [[ $fname = *~ ]]; then
-        continue
-    fi
-
-    mkl_require $fname
-done
-
-
-
-
-##
-## Argument parsing (options)
-##
-##
-
-_SAVE_ARGS="$*"
-
-# Parse arguments
-while [[ ! -z $@ ]]; do
-    if [[ $1 != --* ]]; then
-        mkl_err "Unknown non-option argument: $1"
-        mkl_usage
-        exit 1
-    fi
-
-    opt=${1#--}
-    shift
-
-    if [[ $opt = *=* ]]; then
-        name="${opt%=*}"
-        arg="${opt#*=}"
-        eqarg=1
-    else
-        name="$opt"
-        arg=""
-        eqarg=0
-    fi
-
-    safeopt="$(mkl_env_esc $name)"
-
-    if ! mkl_func_exists opt_$safeopt ; then
-        mkl_err "Unknown option $opt"
-        mkl_usage
-        exit 1
-    fi
-
-    # Check if this option needs an argument.
-    reqarg=$(mkl_meta_get "MKL_OPT_ARGS" "$(mkl_env_esc $name)")
-    if [[ ! -z $reqarg ]]; then
-        if [[ $eqarg == 0 && -z $arg ]]; then
-            arg=$1
-            shift
-
-            if [[ -z $arg ]]; then
-                mkl_err "Missing argument to option --$name $reqarg"
-                exit 1
-            fi
-        fi
-    else
-        if [[ ! -z $arg ]]; then
-            mkl_err "Option --$name expects no argument"
-            exit 1
-        fi
-        arg=y
-    fi
-
-    case $name in
-        re|reconfigure)
-            oldcmd=$(head -1 config.log | grep '^# configure exec: ' | \
-                sed -e 's/^\# configure exec: [^ ]*configure//')
-            echo "Reconfiguring: $0 $oldcmd"
-            exec $0 $oldcmd
-            ;;
-
-        list-modules)
-            echo "Modules loaded:"
-            for mod in $MKL_MODULES ; do
-                echo "  $mod"
-            done
-            exit 0
-            ;;
-
-        list-checks)
-            echo "Check functions in calling order:"
-            for mf in $MKL_CHECKS ; do
-                mod=${mf%:*}
-                func=${mf#*:}
-                echo -e "${MKL_GREEN}From module $mod:$MKL_CLR_RESET"
-                declare -f $func
-                echo ""
-            done
-            exit 0
-            ;;
-
-        update-modules)
-            fails=0
-            echo "Updating modules"
-            for mod in $MKL_MODULES ; do
-                echo -n "Updating $mod..."
-                if mkl_module_download "$mod" > /dev/null ; then
-                    echo -e "${MKL_GREEN}ok${MKL_CLR_RESET}"
-                else
-                    echo -e "${MKL_RED}failed${MKL_CLR_RESET}"
-                    fails=$(expr $fails + 1)
-                fi
-            done
-            exit $fails
-            ;;
-
-        help)
-            mkl_usage
-            exit 0
-            ;;
-
-        *)
-            opt_$safeopt $arg || exit 1
-            mkl_var_append MKL_OPTS_SET "$safeopt"
-            ;;
-    esac
-done
-
-if [[ ! -z $MKL_CLEAN ]]; then
-    mkl_clean
-    exit 0
-fi
-
-# Move away previous log file
-[[ -f $MKL_OUTDBG ]] && mv $MKL_OUTDBG ${MKL_OUTDBG}.old
-
-
-# Create output files
-echo "# configure exec: $0 $_SAVE_ARGS" >> $MKL_OUTDBG
-echo "# On $(date)" >> $MKL_OUTDBG
-
-rm -f $MKL_OUTMK $MKL_OUTH
-
-
-# Load cache file
-mkl_cache_read
-
-# Run checks
-mkl_checks_run
-
-# Check accumulated failures, will not return on failure.
-mkl_check_fails
-
-# Generate outputs
-mkl_generate
-
-# Summarize what happened
-mkl_summary
-
-# Write cache file
-mkl_cache_write
-
-
-echo ""
-echo "Now type 'make' to build"
-trap - EXIT
-exit 0
diff --git a/thirdparty/librdkafka-0.11.4/configure.librdkafka b/thirdparty/librdkafka-0.11.4/configure.librdkafka
deleted file mode 100644
index 500d1e0..0000000
--- a/thirdparty/librdkafka-0.11.4/configure.librdkafka
+++ /dev/null
@@ -1,215 +0,0 @@
-#!/bin/bash
-#
-
-mkl_meta_set "description" "name"      "librdkafka"
-mkl_meta_set "description" "oneline"   "The Apache Kafka C/C++ library"
-mkl_meta_set "description" "long"      "Full Apache Kafka protocol support, including producer and consumer"
-mkl_meta_set "description" "copyright" "Copyright (c) 2012-2015 Magnus Edenhill"
-
-# Enable generation of pkg-config .pc file
-mkl_mkvar_set "" GEN_PKG_CONFIG y
-
-
-mkl_require cxx
-mkl_require lib
-mkl_require pic
-mkl_require atomics
-mkl_require good_cflags
-mkl_require socket
-
-# Generate version variables from rdkafka.h hex version define
-# so we can use it as string version when generating a pkg-config file.
-
-verdef=$(grep '^#define  *RD_KAFKA_VERSION  *0x' src/rdkafka.h | sed 's/^#define  *RD_KAFKA_VERSION  *\(0x[a-f0-9]*\)\.*$/\1/')
-mkl_require parseversion hex2str "%d.%d.%d" "$verdef" RDKAFKA_VERSION_STR
-
-mkl_toggle_option "Development" ENABLE_DEVEL "--enable-devel" "Enable development asserts, checks, etc" "n"
-mkl_toggle_option "Development" ENABLE_VALGRIND "--enable-valgrind" "Enable in-code valgrind suppressions" "n"
-
-mkl_toggle_option "Development" ENABLE_REFCNT_DEBUG "--enable-refcnt-debug" "Enable refcnt debugging" "n"
-
-mkl_toggle_option "Development" ENABLE_SHAREDPTR_DEBUG "--enable-sharedptr-debug" "Enable sharedptr debugging" "n"
-
-mkl_toggle_option "Feature" ENABLE_LZ4_EXT "--enable-lz4" "Enable external LZ4 library support" "y"
-
-mkl_toggle_option "Feature" ENABLE_SSL "--enable-ssl" "Enable SSL support" "y"
-mkl_toggle_option "Feature" ENABLE_SASL "--enable-sasl" "Enable SASL support with Cyrus libsasl2" "y"
-
-
-function checks {
-
-    # -lrt is needed on linux for clock_gettime: link it if it exists.
-    mkl_lib_check "librt" "" cont CC "-lrt"
-
-    # required libs
-    mkl_lib_check "libpthread" "" fail CC "-lpthread" \
-                  "#include <pthread.h>"
-
-    # Check if dlopen() is available
-    mkl_lib_check "libdl" "WITH_LIBDL" disable CC "-ldl" \
-"
-#include <stdlib.h>
-#include <dlfcn.h>
-void foo (void) {
-   void *h = dlopen(\"__bad_lib\", 0);
-   void *p = dlsym(h, \"sym\");
-   if (p)
-     p = NULL;
-   dlclose(h);
-}"
-
-    if [[ $WITH_LIBDL == "y" ]]; then
-        mkl_allvar_set WITH_PLUGINS WITH_PLUGINS y
-    fi
-
-    # optional libs
-    mkl_lib_check "zlib" "WITH_ZLIB" disable CC "-lz" \
-                  "#include <zlib.h>"
-    mkl_lib_check "libcrypto" "" disable CC "-lcrypto"
-
-    if [[ "$ENABLE_LZ4_EXT" == "y" ]]; then
-        mkl_lib_check --static=-llz4 "liblz4" "WITH_LZ4_EXT" disable CC "-llz4" \
-                      "#include <lz4frame.h>"
-    fi
-
-    # Snappy support is built-in
-    mkl_allvar_set WITH_SNAPPY WITH_SNAPPY y
-
-    # Enable sockem (tests)
-    mkl_allvar_set WITH_SOCKEM WITH_SOCKEM y
-
-    if [[ "$ENABLE_SSL" == "y" ]]; then
-	mkl_meta_set "libssl" "deb" "libssl-dev"
-        if [[ $MKL_DISTRO == "osx" ]]; then
-            # Add brew's OpenSSL pkg-config path on OSX
-            export PKG_CONFIG_PATH="$PKG_CONFIG_PATH:/usr/local/opt/openssl/lib/pkgconfig"
-        fi
-	mkl_lib_check "libssl" "WITH_SSL" disable CC "-lssl" \
-                      "#include <openssl/ssl.h>"
-    fi
-
-    if [[ "$ENABLE_SASL" == "y" ]]; then
-        mkl_meta_set "libsasl2" "deb" "libsasl2-dev"
-        if ! mkl_lib_check "libsasl2" "WITH_SASL_CYRUS" disable CC "-lsasl2" "#include <sasl/sasl.h>" ; then
-	    mkl_lib_check "libsasl" "WITH_SASL_CYRUS" disable CC "-lsasl" \
-                          "#include <sasl/sasl.h>"
-        fi
-    fi
-
-    if [[ "$WITH_SSL" == "y" ]]; then
-        # SASL SCRAM requires base64 encoding from OpenSSL
-        mkl_allvar_set WITH_SASL_SCRAM WITH_SASL_SCRAM y
-    fi
-
-    # CRC32C: check for crc32 instruction support.
-    #         This is also checked during runtime using cpuid.
-    mkl_compile_check crc32chw WITH_CRC32C_HW disable CC "" \
-                      "
-#include <inttypes.h>
-#include <stdio.h>
-#define LONGx1 \"8192\"
-#define LONGx2 \"16384\"
-void foo (void) {
-   const char *n = \"abcdefghijklmnopqrstuvwxyz0123456789\";
-   uint64_t c0 = 0, c1 = 1, c2 = 2;
-   uint64_t s;
-   uint32_t eax = 1, ecx;
-   __asm__(\"cpuid\"
-           : \"=c\"(ecx)
-           : \"a\"(eax)
-           : \"%ebx\", \"%edx\");
-   __asm__(\"crc32b\t\" \"(%1), %0\"
-           : \"=r\"(c0)
-           : \"r\"(n), \"0\"(c0));
-   __asm__(\"crc32q\t\" \"(%3), %0\n\t\"
-           \"crc32q\t\" LONGx1 \"(%3), %1\n\t\"
-           \"crc32q\t\" LONGx2 \"(%3), %2\"
-           : \"=r\"(c0), \"=r\"(c1), \"=r\"(c2)
-           : \"r\"(n), \"0\"(c0), \"1\"(c1), \"2\"(c2));
-  s = c0 + c1 + c2;
-  printf(\"avoiding unused code removal by printing %d, %d, %d\n\", (int)s, (int)eax, (int)ecx);
-}
-"
-
-
-    # Check for libc regex
-    mkl_compile_check "regex" "HAVE_REGEX" disable CC "" \
-"
-#include <stddef.h>
-#include <regex.h>
-void foo (void) {
-   regcomp(NULL, NULL, 0);
-   regexec(NULL, NULL, 0, NULL, 0);
-   regerror(0, NULL, NULL, 0);
-   regfree(NULL);
-}"
-
-
-    # Older g++ (<=4.1?) gives invalid warnings for the C++ code.
-    mkl_mkvar_append CXXFLAGS CXXFLAGS "-Wno-non-virtual-dtor"
-
-    # Required on SunOS
-    if [[ $MKL_DISTRO == "SunOS" ]]; then
-	mkl_mkvar_append CPPFLAGS CPPFLAGS "-D_POSIX_PTHREAD_SEMANTICS -D_REENTRANT -D__EXTENSIONS__"
-	# Source defines _POSIX_C_SOURCE to 200809L for Solaris, and this is
-	# incompatible on that platform with compilers < c99.
-	mkl_mkvar_append CFLAGS CFLAGS "-std=c99"
-    fi
-
-    # Check if strndup() is available (isn't on Solaris 10)
-    mkl_compile_check "strndup" "HAVE_STRNDUP" disable CC "" \
-"#include <string.h>
-int foo (void) {
-   return strndup(\"hi\", 2) ? 0 : 1;
-}"
-
-    # Check if strerror_r() is available.
-    # The check for GNU vs XSI is done in rdposix.h since
-    # we can't rely on all defines to be set here (_GNU_SOURCE).
-    mkl_compile_check "strerror_r" "HAVE_STRERROR_R" disable CC "" \
-"#include <string.h>
-const char *foo (void) {
-   static char buf[64];
-   strerror_r(1, buf, sizeof(buf));
-   return buf;
-}"
-
-
-    # See if GNU's pthread_setname_np() is available, and in what form.
-    mkl_compile_check "pthread_setname_gnu" "HAVE_PTHREAD_SETNAME_GNU" disable CC "-D_GNU_SOURCE -lpthread" \
-'
-#include <pthread.h>
-
-void foo (void) {
-  pthread_setname_np(pthread_self(), "abc");
-}
-'
-
-    # Figure out what tool to use for dumping public symbols.
-    # We rely on configure.cc setting up $NM if it exists.
-    if mkl_env_check "nm" "" cont "NM" ; then
-	# nm by future mk var
-	if [[ $MKL_DISTRO == "osx" || $MKL_DISTRO == "AIX" ]]; then
-	    mkl_mkvar_set SYMDUMPER SYMDUMPER '$(NM) -g'
-	else
-	    mkl_mkvar_set SYMDUMPER SYMDUMPER '$(NM) -D'
-	fi
-    else
-	# Fake symdumper
-	mkl_mkvar_set SYMDUMPER SYMDUMPER 'echo'
-    fi
-
-    # The linker-script generator (lds-gen.py) requires python
-    if [[ $WITH_LDS == y ]]; then
-        if ! mkl_command_check python "HAVE_PYTHON" "disable" "python -V"; then
-            mkl_err "disabling linker-script since python is not available"
-            mkl_mkvar_set WITH_LDS WITH_LDS "n"
-        fi
-    fi
-
-    if [[ "$ENABLE_VALGRIND" == "y" ]]; then
-	mkl_compile_check valgrind WITH_VALGRIND disable CC "" \
-			  "#include <valgrind/memcheck.h>"
-    fi
-}
-
diff --git a/thirdparty/librdkafka-0.11.4/dev-conf.sh b/thirdparty/librdkafka-0.11.4/dev-conf.sh
deleted file mode 100755
index c334f97..0000000
--- a/thirdparty/librdkafka-0.11.4/dev-conf.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/bash
-#
-# Configure librdkafka for development
-
-set -e
-./configure --clean
-
-# enable pedantic
-#export CFLAGS='-std=c99 -pedantic -Wshadow'
-#export CXXFLAGS='-std=c++98 -pedantic'
-
-# enable FSAN
-#FSAN="-fsanitize=address"
-#export CPPFLAGS="$CPPFLAGS $FSAN"
-#export LDFLAGS="$LDFLAGS $FSAN"
-
-OPTS=""
-
-# enable devel asserts
-OPTS="$OPTS --enable-devel"
-
-# disable optimizations
-OPTS="$OPTS --disable-optimization"
-
-# gprof
-#OPTS="$OPTS --enable-profiling --disable-optimization"
-
-# disable lz4
-#OPTS="$OPTS --disable-lz4"
-
-# disable cyrus-sasl
-#OPTS="$OPTS --disable-sasl"
-
-# enable sharedptr debugging
-#OPTS="$OPTS --enable-sharedptr-debug"
-
-#enable refcnt debugging
-#OPTS="$OPTS --enable-refcnt-debug"
-
-echo "Devel configuration options: $OPTS"
-./configure $OPTS
-
-make clean
-make -j
-(cd tests ; make -j build)
diff --git a/thirdparty/librdkafka-0.11.4/examples/.gitignore b/thirdparty/librdkafka-0.11.4/examples/.gitignore
deleted file mode 100644
index 3dc3aab..0000000
--- a/thirdparty/librdkafka-0.11.4/examples/.gitignore
+++ /dev/null
@@ -1,8 +0,0 @@
-rdkafka_example
-rdkafka_performance
-rdkafka_example_cpp
-rdkafka_consumer_example
-rdkafka_consumer_example_cpp
-kafkatest_verifiable_client
-rdkafka_simple_producer
-rdkafka_consume_batch
diff --git a/thirdparty/librdkafka-0.11.4/examples/CMakeLists.txt b/thirdparty/librdkafka-0.11.4/examples/CMakeLists.txt
deleted file mode 100644
index dae7f9a..0000000
--- a/thirdparty/librdkafka-0.11.4/examples/CMakeLists.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-if(WIN32)
-    set(win32_sources ../win32/wingetopt.c ../win32/wingetopt.h)
-    set(win32_compile_defs "LIBRDKAFKACPP_EXPORTS=0")
-endif(WIN32)
-
-add_executable(rdkafka_simple_producer rdkafka_simple_producer.c ${win32_sources})
-target_link_libraries(rdkafka_simple_producer PUBLIC rdkafka)
-
-add_executable(rdkafka_performance rdkafka_performance.c ${win32_sources})
-target_link_libraries(rdkafka_performance PUBLIC rdkafka)
-
-add_executable(rdkafka_example_cpp rdkafka_example.cpp ${win32_sources})
-target_link_libraries(rdkafka_example_cpp PUBLIC rdkafka++)
-target_compile_definitions(rdkafka_example_cpp PRIVATE ${win32_compile_defs})
-
-add_executable(rdkafka_consumer_example_cpp rdkafka_consumer_example.cpp ${win32_sources})
-target_link_libraries(rdkafka_consumer_example_cpp PUBLIC rdkafka++)
-target_compile_definitions(rdkafka_consumer_example_cpp PRIVATE ${win32_compile_defs})
-
-# The targets below has Unix include dirs and do not compile on Windows.
-if(NOT WIN32)
-    add_executable(rdkafka_example rdkafka_example.c)
-    target_link_libraries(rdkafka_example PUBLIC rdkafka)
-    
-    add_executable(rdkafka_consumer_example rdkafka_consumer_example.c)
-    target_link_libraries(rdkafka_consumer_example PUBLIC rdkafka)
-    
-    add_executable(kafkatest_verifiable_client kafkatest_verifiable_client.cpp)
-    target_link_libraries(kafkatest_verifiable_client PUBLIC rdkafka++)
-endif(NOT WIN32)
\ No newline at end of file
diff --git a/thirdparty/librdkafka-0.11.4/examples/Makefile b/thirdparty/librdkafka-0.11.4/examples/Makefile
deleted file mode 100644
index d3e0832..0000000
--- a/thirdparty/librdkafka-0.11.4/examples/Makefile
+++ /dev/null
@@ -1,96 +0,0 @@
-EXAMPLES ?= rdkafka_example rdkafka_performance rdkafka_example_cpp \
-	rdkafka_consumer_example rdkafka_consumer_example_cpp \
-	kafkatest_verifiable_client rdkafka_simple_producer
-
-all: $(EXAMPLES)
-
-include ../mklove/Makefile.base
-
-CFLAGS += -I../src
-CXXFLAGS += -I../src-cpp
-
-# librdkafka must be compiled with -gstrict-dwarf, but rdkafka_example must not,
-# due to some clang bug on OSX 10.9
-CPPFLAGS := $(subst strict-dwarf,,$(CPPFLAGS))
-
-rdkafka_example: ../src/librdkafka.a rdkafka_example.c
-	$(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_example.c -o $@ $(LDFLAGS) \
-		../src/librdkafka.a $(LIBS)
-	@echo "# $@ is ready"
-	@echo "#"
-	@echo "# Run producer (write messages on stdin)"
-	@echo "./$@ -P -t <topic> -p <partition>"
-	@echo ""
-	@echo "# or consumer"
-	@echo "./$@ -C -t <topic> -p <partition>"
-	@echo ""
-	@echo "#"
-	@echo "# More usage options:"
-	@echo "./$@ -h"
-
-rdkafka_simple_producer: ../src/librdkafka.a rdkafka_simple_producer.c
-	$(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
-		../src/librdkafka.a $(LIBS)
-
-rdkafka_consumer_example: ../src/librdkafka.a rdkafka_consumer_example.c
-	$(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_consumer_example.c -o $@ $(LDFLAGS) \
-		../src/librdkafka.a $(LIBS)
-	@echo "# $@ is ready"
-	@echo "#"
-	@echo "./$@ <topic[:part]> <topic2[:part]> .."
-	@echo ""
-	@echo "#"
-	@echo "# More usage options:"
-	@echo "./$@ -h"
-
-rdkafka_performance: ../src/librdkafka.a rdkafka_performance.c
-	$(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_performance.c -o $@ $(LDFLAGS) \
-		../src/librdkafka.a $(LIBS)
-	@echo "# $@ is ready"
-	@echo "#"
-	@echo "# Run producer"
-	@echo "./$@ -P -t <topic> -p <partition> -s <msgsize>"
-	@echo ""
-	@echo "# or consumer"
-	@echo "./$@ -C -t <topic> -p <partition>"
-	@echo ""
-	@echo "#"
-	@echo "# More usage options:"
-	@echo "./$@ -h"
-
-
-rdkafka_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_example.cpp
-	$(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_example.cpp -o $@ $(LDFLAGS) \
-		../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++
-
-kafkatest_verifiable_client: ../src-cpp/librdkafka++.a ../src/librdkafka.a kafkatest_verifiable_client.cpp
-	$(CXX) $(CPPFLAGS) $(CXXFLAGS) kafkatest_verifiable_client.cpp -o $@ $(LDFLAGS) \
-		../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++
-
-
-rdkafka_consumer_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_consumer_example.cpp
-	$(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_consumer_example.cpp -o $@ $(LDFLAGS) \
-		../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++
-
-rdkafka_consume_batch: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_consume_batch.cpp
-	$(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_consume_batch.cpp -o $@ $(LDFLAGS) \
-		../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++
-
-rdkafka_zookeeper_example: ../src/librdkafka.a rdkafka_zookeeper_example.c
-	$(CC) $(CPPFLAGS) $(CFLAGS) -I/usr/include/zookeeper rdkafka_zookeeper_example.c -o $@ $(LDFLAGS) \
-		../src/librdkafka.a $(LIBS) -lzookeeper_mt -ljansson
-	@echo "# $@ is ready"
-	@echo "#"
-	@echo "# Run producer (write messages on stdin)"
-	@echo "./$@ -P -t <topic> -p <partition>"
-	@echo ""
-	@echo "# or consumer"
-	@echo "./$@ -C -t <topic> -p <partition>"
-	@echo ""
-	@echo "#"
-	@echo "# More usage options:"
-	@echo "./$@ -h"
-
-clean:
-	rm -f $(EXAMPLES)
-
diff --git a/thirdparty/librdkafka-0.11.4/examples/globals.json b/thirdparty/librdkafka-0.11.4/examples/globals.json
deleted file mode 100644
index 527e126..0000000
--- a/thirdparty/librdkafka-0.11.4/examples/globals.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{"VerifiableConsumer":
- {
-     "class": "kafkatest.services.verifiable_client.VerifiableClientApp",
-     "exec_cmd": "/vagrant/tests/c/kafkatest_verifiable_client --consumer --debug cgrp,topic,protocol,broker"
- },
- "VerifiableProducer":
- {
-     "class": "kafkatest.services.verifiable_client.VerifiableClientApp",
-     "exec_cmd": "/vagrant/tests/c/kafkatest_verifiable_client --producer --debug topic,broker"
- }
-}
diff --git a/thirdparty/librdkafka-0.11.4/examples/kafkatest_verifiable_client.cpp b/thirdparty/librdkafka-0.11.4/examples/kafkatest_verifiable_client.cpp
deleted file mode 100644
index 26e1ae0..0000000
--- a/thirdparty/librdkafka-0.11.4/examples/kafkatest_verifiable_client.cpp
+++ /dev/null
@@ -1,960 +0,0 @@
-/*
- * Copyright (c) 2015, Confluent Inc
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * librdkafka version of the Java VerifiableProducer and VerifiableConsumer
- * for use with the official Kafka client tests.
- */
-
-
-#include <iostream>
-#include <fstream>
-#include <sstream>
-#include <map>
-#include <string>
-#include <algorithm>
-#include <cstdlib>
-#include <cstdio>
-#include <csignal>
-#include <cstring>
-#include <unistd.h>
-#include <sys/time.h>
-#include <assert.h>
-#include <ctype.h>
-#include <strings.h>
-
-#ifdef _MSC_VER
-#include "../win32/wingetopt.h"
-#elif _AIX
-#include <unistd.h>
-#else
-#include <getopt.h>
-#endif
-
-/*
- * Typically include path in a real application would be
- * #include <librdkafka/rdkafkacpp.h>
- */
-#include "rdkafkacpp.h"
-
-static bool run = true;
-static bool exit_eof = false;
-static int verbosity = 1;
-static std::string value_prefix;
-
-class Assignment {
-
- public:
-  static std::string name (const std::string &t, int partition) {
-    std::stringstream stm;
-    stm << t << "." << partition;
-    return stm.str();
-  }
-
-  Assignment(): topic(""), partition(-1), consumedMessages(0),
-                minOffset(-1), maxOffset(0) {
-    printf("Created assignment\n");
-  }
-  Assignment(const Assignment &a) {
-    topic = a.topic;
-    partition = a.partition;
-    consumedMessages = a.consumedMessages;
-    minOffset = a.minOffset;
-    maxOffset = a.maxOffset;
-  }
-
-  Assignment &operator=(const Assignment &a) {
-    this->topic = a.topic;
-    this->partition = a.partition;
-    this->consumedMessages = a.consumedMessages;
-    this->minOffset = a.minOffset;
-    this->maxOffset = a.maxOffset;
-    return *this;
-  }
-
-  int operator==(const Assignment &a) const {
-    return !(this->topic == a.topic &&
-             this->partition == a.partition);
-  }
-
-  int operator<(const Assignment &a) const {
-    if (this->topic < a.topic) return 1;
-    if (this->topic >= a.topic) return 0;
-    return (this->partition < a.partition);
-  }
-
-  void setup (std::string t, int32_t p) {
-    assert(!t.empty());
-    assert(topic.empty() || topic == t);
-    assert(partition == -1 || partition == p);
-    topic = t;
-    partition = p;
-  }
-
-  std::string topic;
-  int partition;
-  int consumedMessages;
-  int64_t minOffset;
-  int64_t maxOffset;
-};
-
-
-
-
-static struct {
-  int maxMessages;
-
-  struct {
-    int numAcked;
-    int numSent;
-    int numErr;
-  } producer;
-
-  struct {
-    int consumedMessages;
-    int consumedMessagesLastReported;
-    int consumedMessagesAtLastCommit;
-    bool useAutoCommit;
-    std::map<std::string, Assignment> assignments;
-  } consumer;
-} state = {
-  /* .maxMessages = */ -1
-};
-
-
-static RdKafka::KafkaConsumer *consumer;
-
-
-static std::string now () {
-  struct timeval tv;
-  gettimeofday(&tv, NULL);
-  time_t t = tv.tv_sec;
-  struct tm tm;
-  char buf[64];
-
-  localtime_r(&t, &tm);
-  strftime(buf, sizeof(buf), "%H:%M:%S", &tm);
-  snprintf(buf+strlen(buf), sizeof(buf)-strlen(buf), ".%03d",
-           (int)(tv.tv_usec / 1000));
-
-  return buf;
-}
-
-
-static time_t watchdog_last_kick;
-static const int watchdog_timeout = 20; /* Must be > socket.timeout.ms */
-static void sigwatchdog (int sig) {
-  time_t t = time(NULL);
-  if (watchdog_last_kick + watchdog_timeout <= t) {
-    std::cerr << now() << ": WATCHDOG TIMEOUT (" <<
-        (int)(t - watchdog_last_kick) << "s): TERMINATING" << std::endl;
-    int *i = NULL;
-    *i = 100;
-    abort();
-  }
-}
-
-static void watchdog_kick () {
-  watchdog_last_kick = time(NULL);
-
-  /* Safe guard against hangs-on-exit */
-  alarm(watchdog_timeout);
-}
-
-
-
-
-
-static void errorString (const std::string &name,
-                         const std::string &errmsg,
-                         const std::string &topic,
-                         const std::string *key,
-                         const std::string &value) {
-  std::cout << "{ "
-            << "\"name\": \"" << name << "\", "
-            << "\"_time\": \"" << now() << "\", "
-            << "\"message\": \"" << errmsg << "\", "
-            << "\"topic\": \"" << topic << "\", "
-            << "\"key\": \"" << (key ? *key : "NULL") << "\", "
-            << "\"value\": \"" << value << "\" "
-            << "}" << std::endl;
-}
-
-
-static void successString (const std::string &name,
-                           const std::string &topic,
-                           int partition,
-                           int64_t offset,
-                           const std::string *key,
-                           const std::string &value) {
-  std::cout << "{ "
-            << "\"name\": \"" << name << "\", "
-            << "\"_time\": \"" << now() << "\", "
-            << "\"topic\": \"" << topic << "\", "
-            << "\"partition\": " << partition << ", "
-            << "\"offset\": " << offset << ", "
-            << "\"key\": \"" << (key ? *key : "NULL") << "\", "
-            << "\"value\": \"" << value << "\" "
-            << "}" << std::endl;
-}
-
-
-#if FIXME
-static void offsetStatus (bool success,
-                          const std::string &topic,
-                          int partition,
-                          int64_t offset,
-                          const std::string &errstr) {
-  std::cout << "{ "
-      "\"name\": \"offsets_committed\", " <<
-      "\"success\": " << success << ", " <<
-      "\"offsets\": [ " <<
-      " { " <<
-      " \"topic\": \"" << topic << "\", " <<
-      " \"partition\": " << partition << ", " <<
-      " \"offset\": " << (int)offset << ", " <<
-      " \"error\": \"" << errstr << "\" " <<
-      " } " <<
-      "] }" << std::endl;
-
-}
-#endif
-
-
-static void sigterm (int sig) {
-
-  std::cerr << now() << ": Terminating because of signal " << sig << std::endl;
-
-  if (!run) {
-    std::cerr << now() << ": Forced termination" << std::endl;
-    exit(1);
-  }
-  run = false;
-}
-
-
-class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
- public:
-  void dr_cb (RdKafka::Message &message) {
-    if (message.err()) {
-      state.producer.numErr++;
-      errorString("producer_send_error", message.errstr(),
-                  message.topic_name(),
-                  message.key(),
-                  std::string(static_cast<const char*>(message.payload()),
-                              message.len()));
-    } else {
-      successString("producer_send_success",
-                    message.topic_name(),
-                    (int)message.partition(),
-                    message.offset(),
-                    message.key(),
-                    std::string(static_cast<const char*>(message.payload()),
-                                message.len()));
-      state.producer.numAcked++;
-    }
-  }
-};
-
-
-class ExampleEventCb : public RdKafka::EventCb {
- public:
-  void event_cb (RdKafka::Event &event) {
-    switch (event.type())
-    {
-      case RdKafka::Event::EVENT_ERROR:
-        std::cerr << now() << ": ERROR (" << RdKafka::err2str(event.err()) << "): " <<
-            event.str() << std::endl;
-        break;
-
-      case RdKafka::Event::EVENT_STATS:
-        std::cerr << now() << ": \"STATS\": " << event.str() << std::endl;
-        break;
-
-      case RdKafka::Event::EVENT_LOG:
-        std::cerr << now() << ": LOG-" << event.severity() << "-"
-                  << event.fac() << ": " << event.str() << std::endl;
-        break;
-
-      default:
-        std::cerr << now() << ": EVENT " << event.type() <<
-            " (" << RdKafka::err2str(event.err()) << "): " <<
-            event.str() << std::endl;
-        break;
-    }
-  }
-};
-
-
-/* Use of this partitioner is pretty pointless since no key is provided
- * in the produce() call. */
-class MyHashPartitionerCb : public RdKafka::PartitionerCb {
- public:
-  int32_t partitioner_cb (const RdKafka::Topic *topic, const std::string *key,
-                          int32_t partition_cnt, void *msg_opaque) {
-    return djb_hash(key->c_str(), key->size()) % partition_cnt;
-  }
- private:
-
-  static inline unsigned int djb_hash (const char *str, size_t len) {
-    unsigned int hash = 5381;
-    for (size_t i = 0 ; i < len ; i++)
-      hash = ((hash << 5) + hash) + str[i];
-    return hash;
-  }
-};
-
-
-
-
-
-/**
- * Print number of records consumed, every 100 messages or on timeout.
- */
-static void report_records_consumed (int immediate) {
-  std::map<std::string,Assignment> *assignments = &state.consumer.assignments;
-
-  if (state.consumer.consumedMessages <=
-      state.consumer.consumedMessagesLastReported + (immediate ? 0 : 999))
-    return;
-
-  std::cout << "{ "
-      "\"name\": \"records_consumed\", " <<
-      "\"_totcount\": " << state.consumer.consumedMessages << ", " <<
-      "\"count\": " << (state.consumer.consumedMessages -
-                        state.consumer.consumedMessagesLastReported) << ", " <<
-      "\"partitions\": [ ";
-
-  for (std::map<std::string,Assignment>::iterator ii = assignments->begin() ;
-       ii != assignments->end() ; ii++) {
-    Assignment *a = &(*ii).second;
-    assert(!a->topic.empty());
-    std::cout << (ii == assignments->begin() ? "": ", ") << " { " <<
-        " \"topic\": \"" << a->topic << "\", " <<
-        " \"partition\": " << a->partition << ", " <<
-        " \"minOffset\": " << a->minOffset << ", " <<
-        " \"maxOffset\": " << a->maxOffset << " " <<
-        " } ";
-    a->minOffset = -1;
-  }
-
-  std::cout << "] }" << std::endl;
-
-  state.consumer.consumedMessagesLastReported = state.consumer.consumedMessages;
-}
-
-
-class ExampleOffsetCommitCb : public RdKafka::OffsetCommitCb {
- public:
-  void offset_commit_cb (RdKafka::ErrorCode err,
-                         std::vector<RdKafka::TopicPartition*> &offsets) {
-    std::cerr << now() << ": Propagate offset for " << offsets.size() << " partitions, error: " << RdKafka::err2str(err) << std::endl;
-
-    /* No offsets to commit, dont report anything. */
-    if (err == RdKafka::ERR__NO_OFFSET)
-      return;
-
-    /* Send up-to-date records_consumed report to make sure consumed > committed */
-    report_records_consumed(1);
-
-    std::cout << "{ " <<
-        "\"name\": \"offsets_committed\", " <<
-        "\"success\": " << (err ? "false" : "true") << ", " <<
-        "\"error\": \"" << (err ? RdKafka::err2str(err) : "") << "\", " <<
-        "\"_autocommit\": " << (state.consumer.useAutoCommit ? "true":"false") << ", " <<
-        "\"offsets\": [ ";
-    assert(offsets.size() > 0);
-    for (unsigned int i = 0 ; i < offsets.size() ; i++) {
-      std::cout << (i == 0 ? "" : ", ") << "{ " <<
-          " \"topic\": \"" << offsets[i]->topic() << "\", " <<
-          " \"partition\": " << offsets[i]->partition() << ", " <<
-          " \"offset\": " << (int)offsets[i]->offset() << ", " <<
-          " \"error\": \"" <<
-          (offsets[i]->err() ? RdKafka::err2str(offsets[i]->err()) : "") <<
-          "\" " <<
-          " }";
-    }
-    std::cout << " ] }" << std::endl;
-
-  }
-};
-
-static ExampleOffsetCommitCb ex_offset_commit_cb;
-
-
-/**
- * Commit every 1000 messages or whenever there is a consume timeout.
- */
-static void do_commit (RdKafka::KafkaConsumer *consumer,
-                      int immediate) {
-  if (!immediate &&
-      (state.consumer.useAutoCommit ||
-       state.consumer.consumedMessagesAtLastCommit + 1000 >
-       state.consumer.consumedMessages))
-    return;
-
-  /* Make sure we report consumption before commit,
-   * otherwise tests may fail because of commit > consumed. */
-  if (state.consumer.consumedMessagesLastReported <
-      state.consumer.consumedMessages)
-    report_records_consumed(1);
-
-  std::cerr << now() << ": committing " <<
-    (state.consumer.consumedMessages -
-     state.consumer.consumedMessagesAtLastCommit) << " messages" << std::endl;
-
-  RdKafka::ErrorCode err;
-  err = consumer->commitSync(&ex_offset_commit_cb);
-
-  std::cerr << now() << ": " <<
-    "sync commit returned " << RdKafka::err2str(err) << std::endl;
-
-  state.consumer.consumedMessagesAtLastCommit =
-    state.consumer.consumedMessages;
-}
-
-
-void msg_consume(RdKafka::KafkaConsumer *consumer,
-                 RdKafka::Message* msg, void* opaque) {
-  switch (msg->err()) {
-    case RdKafka::ERR__TIMED_OUT:
-      /* Try reporting consumed messages */
-      report_records_consumed(1);
-      /* Commit one every consume() timeout instead of on every message.
-       * Also commit on every 1000 messages, whichever comes first. */
-      do_commit(consumer, 1);
-      break;
-
-
-    case RdKafka::ERR_NO_ERROR:
-      {
-        /* Real message */
-        if (verbosity > 2)
-          std::cerr << now() << ": Read msg from " << msg->topic_name() <<
-              " [" << (int)msg->partition() << "]  at offset " <<
-              msg->offset() << std::endl;
-
-        if (state.maxMessages >= 0 &&
-            state.consumer.consumedMessages >= state.maxMessages)
-          return;
-
-
-        Assignment *a =
-            &state.consumer.assignments[Assignment::name(msg->topic_name(),
-                                                         msg->partition())];
-        a->setup(msg->topic_name(), msg->partition());
-
-        a->consumedMessages++;
-        if (a->minOffset == -1)
-          a->minOffset = msg->offset();
-        if (a->maxOffset < msg->offset())
-          a->maxOffset = msg->offset();
-
-        if (msg->key()) {
-          if (verbosity >= 3)
-            std::cerr << now() << ": Key: " << *msg->key() << std::endl;
-        }
-
-        if (verbosity >= 3)
-          fprintf(stderr, "%.*s\n",
-                  static_cast<int>(msg->len()),
-                  static_cast<const char *>(msg->payload()));
-
-        state.consumer.consumedMessages++;
-
-        report_records_consumed(0);
-
-        do_commit(consumer, 0);
-      }
-      break;
-
-    case RdKafka::ERR__PARTITION_EOF:
-      /* Last message */
-      if (exit_eof) {
-        std::cerr << now() << ": Terminate: exit on EOF" << std::endl;
-        run = false;
-      }
-      break;
-
-    case RdKafka::ERR__UNKNOWN_TOPIC:
-    case RdKafka::ERR__UNKNOWN_PARTITION:
-      std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl;
-      run = false;
-      break;
-
-    case RdKafka::ERR_GROUP_COORDINATOR_NOT_AVAILABLE:
-      std::cerr << now() << ": Warning: " << msg->errstr() << std::endl;
-      break;
-
-    default:
-      /* Errors */
-      std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl;
-      run = false;
-  }
-}
-
-
-
-
-class ExampleConsumeCb : public RdKafka::ConsumeCb {
- public:
-  void consume_cb (RdKafka::Message &msg, void *opaque) {
-    msg_consume(consumer_, &msg, opaque);
-  }
-  RdKafka::KafkaConsumer *consumer_;
-};
-
-class ExampleRebalanceCb : public RdKafka::RebalanceCb {
- private:
-  static std::string part_list_json (const std::vector<RdKafka::TopicPartition*> &partitions) {
-    std::ostringstream out;
-    for (unsigned int i = 0 ; i < partitions.size() ; i++)
-      out << (i==0?"":", ") << "{ " <<
-          " \"topic\": \"" << partitions[i]->topic() << "\", " <<
-          " \"partition\": " << partitions[i]->partition() <<
-          " }";
-    return out.str();
-  }
- public:
-  void rebalance_cb (RdKafka::KafkaConsumer *consumer,
-                     RdKafka::ErrorCode err,
-                     std::vector<RdKafka::TopicPartition*> &partitions) {
-
-    std::cerr << now() << ": rebalance_cb " << RdKafka::err2str(err) <<
-        " for " << partitions.size() << " partitions" << std::endl;
-    /* Send message report prior to rebalancing event to make sure they
-     * are accounted for on the "right side" of the rebalance. */
-    report_records_consumed(1);
-
-    if (err == RdKafka::ERR__ASSIGN_PARTITIONS)
-      consumer->assign(partitions);
-    else {
-      do_commit(consumer, 1);
-      consumer->unassign();
-    }
-
-    std::cout <<
-      "{ " <<
-      "\"name\": \"partitions_" << (err == RdKafka::ERR__ASSIGN_PARTITIONS ?
-                                    "assigned" : "revoked") << "\", " <<
-      "\"partitions\": [ " << part_list_json(partitions) << "] }" << std::endl;
-
-  }
-};
-
-
-
-/**
- * @brief Read (Java client) configuration file
- */
-static void read_conf_file (RdKafka::Conf *conf, const std::string &conf_file) {
-  std::ifstream inf(conf_file.c_str());
-
-  if (!inf) {
-    std::cerr << now() << ": " << conf_file << ": could not open file" << std::endl;
-    exit(1);
-  }
-
-  std::cerr << now() << ": " << conf_file << ": read config file" << std::endl;
-
-  std::string line;
-  int linenr = 0;
-
-  while (std::getline(inf, line)) {
-    linenr++;
-
-    // Ignore comments and empty lines
-    if (line[0] == '#' || line.length() == 0)
-      continue;
-
-    // Match on key=value..
-    size_t d = line.find("=");
-    if (d == 0 || d == std::string::npos) {
-      std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << line << ": ignoring invalid line (expect key=value): " << ::std::endl;
-      continue;
-    }
-
-    std::string key = line.substr(0, d);
-    std::string val = line.substr(d+1);
-
-    std::string errstr;
-    if (conf->set(key, val, errstr)) {
-      std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key << "=" << val << ": " << errstr << ": ignoring error" << std::endl;
-    } else {
-      std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key << "=" << val << ": applied to configuration" << std::endl;
-    }
-  }
-
-  inf.close();
-}
-
-
-
-
-int main (int argc, char **argv) {
-  std::string brokers = "localhost";
-  std::string errstr;
-  std::vector<std::string> topics;
-  std::string mode = "P";
-  int throughput = 0;
-  int32_t partition = RdKafka::Topic::PARTITION_UA;
-  MyHashPartitionerCb hash_partitioner;
-  int64_t create_time = -1;
-
-  std::cerr << now() << ": librdkafka version " << RdKafka::version_str() <<
-    " (" << RdKafka::version() << ")" << std::endl;
-
-  /*
-   * Create configuration objects
-   */
-  RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
-
-  /* Java VerifiableProducer defaults to acks=all */
-  if (conf->set("acks", "all", errstr)) {
-    std::cerr << now() << ": " << errstr << std::endl;
-    exit(1);
-  }
-
-  /* Avoid slow shutdown on error */
-  if (conf->set("message.timeout.ms", "60000", errstr)) {
-    std::cerr << now() << ": " << errstr << std::endl;
-    exit(1);
-  }
-
-  {
-    char hostname[128];
-    gethostname(hostname, sizeof(hostname)-1);
-    conf->set("client.id", std::string("rdkafka@") + hostname, errstr);
-  }
-
-  conf->set("log.thread.name", "true", errstr);
-
-  /* correct producer offsets */
-  conf->set("produce.offset.report", "true", errstr);
-
-  /* auto commit is explicitly enabled with --enable-autocommit */
-  conf->set("enable.auto.commit", "false", errstr);
-
-  /* keep protocol request timeouts under the watchdog timeout
-   * to make sure things like commitSync() dont fall victim to the watchdog. */
-  conf->set("socket.timeout.ms", "10000", errstr);
-
-  conf->set("fetch.wait.max.ms", "500", errstr);
-  conf->set("fetch.min.bytes", "4096", errstr);
-
-  for (int i = 1 ; i < argc ; i++) {
-    const char *name = argv[i];
-    const char *val = i+1 < argc ? argv[i+1] : NULL;
-
-    if (val && !strncmp(val, "-", 1))
-      val = NULL;
-
-    std::cout << now() << ": argument: " << name << " " <<
-        (val?val:"") << std::endl;
-
-    if (val) {
-      if (!strcmp(name, "--topic"))
-        topics.push_back(val);
-      else if (!strcmp(name, "--broker-list"))
-        brokers = val;
-      else if (!strcmp(name, "--max-messages"))
-        state.maxMessages = atoi(val);
-      else if (!strcmp(name, "--throughput"))
-        throughput = atoi(val);
-      else if (!strcmp(name, "--producer.config") ||
-               !strcmp(name, "--consumer.config"))
-        read_conf_file(conf, val);
-      else if (!strcmp(name, "--group-id"))
-        conf->set("group.id", val, errstr);
-      else if (!strcmp(name, "--session-timeout"))
-        conf->set("session.timeout.ms", val, errstr);
-      else if (!strcmp(name, "--reset-policy")) {
-        if (conf->set("auto.offset.reset", val, errstr)) {
-          std::cerr << now() << ": " << errstr << std::endl;
-          exit(1);
-        }
-      } else if (!strcmp(name, "--assignment-strategy")) {
-        /* The system tests pass the Java class name(s) rather than
-         * the configuration value. Fix it.
-         * "org.apache.kafka.clients.consumer.RangeAssignor,.." -> "range,.."
-         */
-        std::string s = val;
-        size_t pos;
-
-        while ((pos = s.find("org.apache.kafka.clients.consumer.")) !=
-               std::string::npos)
-          s.erase(pos, strlen("org.apache.kafka.clients.consumer."));
-
-        while ((pos = s.find("Assignor")) != std::string::npos)
-          s.erase(pos, strlen("Assignor"));
-
-        std::transform(s.begin(), s.end(), s.begin(), tolower);
-
-        std::cerr << now() << ": converted " << name << " "
-                  << val << " to " << s << std::endl;
-
-        if  (conf->set("partition.assignment.strategy", s.c_str(), errstr)) {
-          std::cerr << now() << ": " << errstr << std::endl;
-          exit(1);
-        }
-      } else if (!strcmp(name, "--value-prefix")) {
-        value_prefix = std::string(val) + ".";
-      } else if (!strcmp(name, "--acks")) {
-       if (conf->set("acks", val, errstr)) {
-         std::cerr << now() << ": " << errstr << std::endl;
-         exit(1);
-       }
-      } else if (!strcmp(name, "--message-create-time")) {
-       create_time = (int64_t)atoi(val);
-      } else if (!strcmp(name, "--debug")) {
-        conf->set("debug", val, errstr);
-      } else if (!strcmp(name, "-X")) {
-        char *s = strdup(val);
-        char *t = strchr(s, '=');
-        if (!t)
-          t = (char *)"";
-        else {
-          *t = '\0';
-          t++;
-        }
-        if (conf->set(s, t, errstr)) {
-          std::cerr << now() << ": " << errstr << std::endl;
-          exit(1);
-        }
-        free(s);
-      } else {
-        std::cerr << now() << ": Unknown option " << name << std::endl;
-        exit(1);
-      }
-
-      i++;
-
-    } else {
-      if (!strcmp(name, "--consumer"))
-        mode = "C";
-      else if (!strcmp(name, "--producer"))
-        mode = "P";
-      else if (!strcmp(name, "--enable-autocommit")) {
-        state.consumer.useAutoCommit = true;
-        conf->set("enable.auto.commit", "true", errstr);
-      } else if (!strcmp(name, "-v"))
-        verbosity++;
-      else if (!strcmp(name, "-q"))
-        verbosity--;
-      else {
-        std::cerr << now() << ": Unknown option or missing argument to " << name << std::endl;
-        exit(1);
-      }
-    }
-  }
-
-  if (topics.empty() || brokers.empty()) {
-    std::cerr << now() << ": Missing --topic and --broker-list" << std::endl;
-    exit(1);
-  }
-
-
-  /*
-   * Set configuration properties
-   */
-  conf->set("metadata.broker.list", brokers, errstr);
-
-  ExampleEventCb ex_event_cb;
-  conf->set("event_cb", &ex_event_cb, errstr);
-
-  signal(SIGINT, sigterm);
-  signal(SIGTERM, sigterm);
-  signal(SIGALRM,  sigwatchdog);
-
-
-  if (mode == "P") {
-    /*
-     * Producer mode
-     */
-
-    ExampleDeliveryReportCb ex_dr_cb;
-
-    /* Set delivery report callback */
-    conf->set("dr_cb", &ex_dr_cb, errstr);
-
-    /*
-     * Create producer using accumulated global configuration.
-     */
-    RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
-    if (!producer) {
-      std::cerr << now() << ": Failed to create producer: " << errstr << std::endl;
-      exit(1);
-    }
-
-    std::cerr << now() << ": % Created producer " << producer->name() << std::endl;
-
-    /*
-     * Create topic handle.
-     */
-    RdKafka::Topic *topic = RdKafka::Topic::create(producer, topics[0],
-                                                   NULL, errstr);
-    if (!topic) {
-      std::cerr << now() << ": Failed to create topic: " << errstr << std::endl;
-      exit(1);
-    }
-
-    static const int delay_us = throughput ? 1000000/throughput : 10;
-
-    if (state.maxMessages == -1)
-      state.maxMessages = 1000000; /* Avoid infinite produce */
-
-    for (int i = 0 ; run && i < state.maxMessages ; i++) {
-      /*
-       * Produce message
-       */
-      std::ostringstream msg;
-      msg << value_prefix << i;
-      while (true) {
-        RdKafka::ErrorCode resp;
-       if (create_time == -1) {
-         resp = producer->produce(topic, partition,
-                                  RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
-                                  const_cast<char *>(msg.str().c_str()),
-                                  msg.str().size(), NULL, NULL);
-       } else {
-         resp = producer->produce(topics[0], partition,
-                                  RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
-                                  const_cast<char *>(msg.str().c_str()),
-                                  msg.str().size(),
-                                  NULL, 0,
-                                  create_time,
-                                  NULL);
-       }
-
-        if (resp == RdKafka::ERR__QUEUE_FULL) {
-          producer->poll(100);
-          continue;
-        } else if (resp != RdKafka::ERR_NO_ERROR) {
-          errorString("producer_send_error",
-                      RdKafka::err2str(resp), topic->name(), NULL, msg.str());
-          state.producer.numErr++;
-        } else {
-          state.producer.numSent++;
-        }
-        break;
-      }
-
-      producer->poll(delay_us / 1000);
-      usleep(1000);
-      watchdog_kick();
-    }
-    run = true;
-
-    while (run && producer->outq_len() > 0) {
-      std::cerr << now() << ": Waiting for " << producer->outq_len() << std::endl;
-      producer->poll(1000);
-      watchdog_kick();
-    }
-
-    std::cerr << now() << ": " << state.producer.numAcked << "/" <<
-        state.producer.numSent << "/" << state.maxMessages <<
-        " msgs acked/sent/max, " << state.producer.numErr <<
-        " errored" << std::endl;
-
-    delete topic;
-    delete producer;
-
-
-  } else if (mode == "C") {
-    /*
-     * Consumer mode
-     */
-
-    conf->set("auto.offset.reset", "smallest", errstr);
-
-    ExampleRebalanceCb ex_rebalance_cb;
-    conf->set("rebalance_cb", &ex_rebalance_cb, errstr);
-
-    conf->set("offset_commit_cb", &ex_offset_commit_cb, errstr);
-
-
-    /*
-     * Create consumer using accumulated global configuration.
-     */
-    consumer = RdKafka::KafkaConsumer::create(conf, errstr);
-    if (!consumer) {
-      std::cerr << now() << ": Failed to create consumer: " <<
-          errstr << std::endl;
-      exit(1);
-    }
-
-    std::cerr << now() << ": % Created consumer " << consumer->name() <<
-        std::endl;
-
-    /*
-     * Subscribe to topic(s)
-     */
-    RdKafka::ErrorCode resp = consumer->subscribe(topics);
-    if (resp != RdKafka::ERR_NO_ERROR) {
-      std::cerr << now() << ": Failed to subscribe to " << topics.size() << " topics: "
-                << RdKafka::err2str(resp) << std::endl;
-      exit(1);
-    }
-
-    watchdog_kick();
-
-    /*
-     * Consume messages
-     */
-    while (run) {
-      RdKafka::Message *msg = consumer->consume(500);
-      msg_consume(consumer, msg, NULL);
-      delete msg;
-      watchdog_kick();
-    }
-
-    std::cerr << now() << ": Final commit on termination" << std::endl;
-
-    /* Final commit */
-    do_commit(consumer, 1);
-
-    /*
-     * Stop consumer
-     */
-    consumer->close();
-
-    delete consumer;
-  }
-
-  std::cout << "{ \"name\": \"shutdown_complete\" }" << std::endl;
-
-  /*
-   * Wait for RdKafka to decommission.
-   * This is not strictly needed (when check outq_len() above), but
-   * allows RdKafka to clean up all its resources before the application
-   * exits so that memory profilers such as valgrind wont complain about
-   * memory leaks.
-   */
-  RdKafka::wait_destroyed(5000);
-
-  std::cerr << now() << ": EXITING WITH RETURN VALUE 0" << std::endl;
-  return 0;
-}
diff --git a/thirdparty/librdkafka-0.11.4/examples/rdkafka_consume_batch.cpp b/thirdparty/librdkafka-0.11.4/examples/rdkafka_consume_batch.cpp
deleted file mode 100644
index ea4a169..0000000
--- a/thirdparty/librdkafka-0.11.4/examples/rdkafka_consume_batch.cpp
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Apache Kafka consumer & producer example programs
- * using the Kafka driver from librdkafka
- * (https://github.com/edenhill/librdkafka)
- *
- * This example shows how to read batches of messages.
- * Note that messages are fetched from the broker in batches regardless
- * of how the application polls messages from librdkafka, this example
- * merely shows how to accumulate a set of messages in the application.
- */
-
-#include <iostream>
-#include <string>
-#include <cstdlib>
-#include <cstdio>
-#include <csignal>
-#include <cstring>
-
-#ifndef _MSC_VER
-#include <sys/time.h>
-#endif
-
-#ifdef _MSC_VER
-#include "../win32/wingetopt.h"
-#include <atltime.h>
-#elif _AIX
-#include <unistd.h>
-#else
-#include <getopt.h>
-#include <unistd.h>
-#endif
-
-/*
- * Typically include path in a real application would be
- * #include <librdkafka/rdkafkacpp.h>
- */
-#include "rdkafkacpp.h"
-
-
-
-static bool run = true;
-
-static void sigterm (int sig) {
-  run = false;
-}
-
-
-
-/**
- * @returns the current wall-clock time in milliseconds
- */
-static int64_t now () {
-#ifndef _MSC_VER
-        struct timeval tv;
-        gettimeofday(&tv, NULL);
-        return ((int64_t)tv.tv_sec * 1000) + (tv.tv_usec / 1000);
-#else
-#error "now() not implemented for Windows, please submit a PR"
-#endif
-}
-
-
-
-/**
- * @brief Accumulate a batch of \p batch_size messages, but wait
- *        no longer than \p batch_tmout milliseconds.
- */
-static std::vector<RdKafka::Message *>
-consume_batch (RdKafka::KafkaConsumer *consumer, size_t batch_size, int batch_tmout) {
-
-  std::vector<RdKafka::Message *> msgs;
-  msgs.reserve(batch_size);
-
-  int64_t end = now() + batch_tmout;
-  int remaining_timeout = batch_tmout;
-
-  while (msgs.size() < batch_size) {
-    RdKafka::Message *msg = consumer->consume(remaining_timeout);
-
-    switch (msg->err()) {
-    case RdKafka::ERR__TIMED_OUT:
-      delete msg;
-      return msgs;
-
-    case RdKafka::ERR_NO_ERROR:
-      msgs.push_back(msg);
-      break;
-
-    default:
-      std::cerr << "%% Consumer error: " << msg->errstr() << std::endl;
-      run = false;
-      delete msg;
-      return msgs;
-    }
-
-    remaining_timeout = end - now();
-    if (remaining_timeout < 0)
-      break;
-  }
-
-  return msgs;
-}
-
-
-int main (int argc, char **argv) {
-  std::string errstr;
-  std::string topic_str;
-  std::vector<std::string> topics;
-  int batch_size = 100;
-  int batch_tmout = 1000;
-
-  /* Create configuration objects */
-  RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
-
-  if (conf->set("enable.partition.eof", "false", errstr) != RdKafka::Conf::CONF_OK) {
-    std::cerr << errstr << std::endl;
-    exit(1);
-  }
-
-  /* Read command line arguments */
-  int opt;
-  while ((opt = getopt(argc, argv, "g:B:T::b:X:")) != -1) {
-    switch (opt) {
-    case 'g':
-      if (conf->set("group.id",  optarg, errstr) != RdKafka::Conf::CONF_OK) {
-        std::cerr << errstr << std::endl;
-        exit(1);
-      }
-      break;
-
-    case 'B':
-      batch_size = atoi(optarg);
-      break;
-
-    case 'T':
-      batch_tmout = atoi(optarg);
-      break;
-
-    case 'b':
-      if (conf->set("bootstrap.servers", optarg, errstr) != RdKafka::Conf::CONF_OK) {
-        std::cerr << errstr << std::endl;
-        exit(1);
-      }
-      break;
-
-    case 'X':
-      {
-        char *name, *val;
-
-        name = optarg;
-        if (!(val = strchr(name, '='))) {
-          std::cerr << "%% Expected -X property=value, not " <<
-              name << std::endl;
-          exit(1);
-        }
-
-        *val = '\0';
-        val++;
-
-        if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) {
-          std::cerr << errstr << std::endl;
-          exit(1);
-        }
-      }
-      break;
-
-    default:
-      goto usage;
-    }
-  }
-
-  /* Topics to consume */
-  for (; optind < argc ; optind++)
-    topics.push_back(std::string(argv[optind]));
-
-  if (topics.empty() || optind != argc) {
-  usage:
-    fprintf(stderr,
-            "Usage: %s -g <group-id> -B <batch-size> [options] topic1 topic2..\n"
-            "\n"
-            "librdkafka version %s (0x%08x)\n"
-            "\n"
-            " Options:\n"
-            "  -g <group-id>    Consumer group id\n"
-            "  -B <batch-size>  How many messages to batch (default: 100).\n"
-            "  -T <batch-tmout> How long to wait for batch-size to accumulate in milliseconds. (default 1000 ms)\n"
-            "  -b <brokers>    Broker address (localhost:9092)\n"
-            "  -X <prop=name>  Set arbitrary librdkafka configuration property\n"
-            "\n",
-            argv[0],
-            RdKafka::version_str().c_str(), RdKafka::version());
-        exit(1);
-  }
-
-
-  signal(SIGINT, sigterm);
-  signal(SIGTERM, sigterm);
-
-  /* Create consumer */
-  RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create(conf, errstr);
-  if (!consumer) {
-    std::cerr << "Failed to create consumer: " << errstr << std::endl;
-    exit(1);
-  }
-
-  delete conf;
-
-  /* Subscribe to topics */
-  RdKafka::ErrorCode err = consumer->subscribe(topics);
-  if (err) {
-    std::cerr << "Failed to subscribe to " << topics.size() << " topics: "
-              << RdKafka::err2str(err) << std::endl;
-    exit(1);
-  }
-
-  /* Consume messages in batches of \p batch_size */
-  while (run) {
-    auto msgs = consume_batch(consumer, batch_size, batch_tmout);
-    std::cout << "Accumulated " << msgs.size() << " messages:" << std::endl;
-
-    for (auto &msg : msgs) {
-      std::cout << " Message in " << msg->topic_name() << " [" << msg->partition() << "] at offset " << msg->offset() << std::endl;
-      delete msg;
-    }
-  }
-
-  /* Close and destroy consumer */
-  consumer->close();
-  delete consumer;
-
-  return 0;
-}
diff --git a/thirdparty/librdkafka-0.11.4/examples/rdkafka_consumer_example.c b/thirdparty/librdkafka-0.11.4/examples/rdkafka_consumer_example.c
deleted file mode 100644
index 3896df8..0000000
--- a/thirdparty/librdkafka-0.11.4/examples/rdkafka_consumer_example.c
+++ /dev/null
@@ -1,624 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2015, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Apache Kafka high level consumer example program
- * using the Kafka driver from librdkafka
- * (https://github.com/edenhill/librdkafka)
- */
-
-#include <ctype.h>
-#include <signal.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <syslog.h>
-#include <sys/time.h>
-#include <errno.h>
-#include <getopt.h>
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is builtin from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h"  /* for Kafka driver */
-
-
-static int run = 1;
-static rd_kafka_t *rk;
-static int exit_eof = 0;
-static int wait_eof = 0;  /* number of partitions awaiting EOF */
-static int quiet = 0;
-static 	enum {
-	OUTPUT_HEXDUMP,
-	OUTPUT_RAW,
-} output = OUTPUT_HEXDUMP;
-
-static void stop (int sig) {
-        if (!run)
-                exit(1);
-	run = 0;
-	fclose(stdin); /* abort fgets() */
-}
-
-
-static void hexdump (FILE *fp, const char *name, const void *ptr, size_t len) {
-	const char *p = (const char *)ptr;
-	unsigned int of = 0;
-
-
-	if (name)
-		fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
-
-	for (of = 0 ; of < len ; of += 16) {
-		char hexen[16*3+1];
-		char charen[16+1];
-		int hof = 0;
-
-		int cof = 0;
-		int i;
-
-		for (i = of ; i < (int)of + 16 && i < (int)len ; i++) {
-			hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff);
-			cof += sprintf(charen+cof, "%c",
-				       isprint((int)p[i]) ? p[i] : '.');
-		}
-		fprintf(fp, "%08x: %-48s %-16s\n",
-			of, hexen, charen);
-	}
-}
-
-/**
- * Kafka logger callback (optional)
- */
-static void logger (const rd_kafka_t *rk, int level,
-		    const char *fac, const char *buf) {
-	struct timeval tv;
-	gettimeofday(&tv, NULL);
-	fprintf(stdout, "%u.%03u RDKAFKA-%i-%s: %s: %s\n",
-		(int)tv.tv_sec, (int)(tv.tv_usec / 1000),
-		level, fac, rd_kafka_name(rk), buf);
-}
-
-
-
-/**
- * Handle and print a consumed message.
- * Internally crafted messages are also used to propagate state from
- * librdkafka to the application. The application needs to check
- * the `rkmessage->err` field for this purpose.
- */
-static void msg_consume (rd_kafka_message_t *rkmessage) {
-	if (rkmessage->err) {
-		if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
-			fprintf(stderr,
-				"%% Consumer reached end of %s [%"PRId32"] "
-			       "message queue at offset %"PRId64"\n",
-			       rd_kafka_topic_name(rkmessage->rkt),
-			       rkmessage->partition, rkmessage->offset);
-
-			if (exit_eof && --wait_eof == 0) {
-                                fprintf(stderr,
-                                        "%% All partition(s) reached EOF: "
-                                        "exiting\n");
-				run = 0;
-                        }
-
-			return;
-		}
-
-                if (rkmessage->rkt)
-                        fprintf(stderr, "%% Consume error for "
-                                "topic \"%s\" [%"PRId32"] "
-                                "offset %"PRId64": %s\n",
-                                rd_kafka_topic_name(rkmessage->rkt),
-                                rkmessage->partition,
-                                rkmessage->offset,
-                                rd_kafka_message_errstr(rkmessage));
-                else
-                        fprintf(stderr, "%% Consumer error: %s: %s\n",
-                                rd_kafka_err2str(rkmessage->err),
-                                rd_kafka_message_errstr(rkmessage));
-
-                if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
-                    rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
-                        run = 0;
-		return;
-	}
-
-	if (!quiet)
-		fprintf(stdout, "%% Message (topic %s [%"PRId32"], "
-                        "offset %"PRId64", %zd bytes):\n",
-                        rd_kafka_topic_name(rkmessage->rkt),
-                        rkmessage->partition,
-			rkmessage->offset, rkmessage->len);
-
-	if (rkmessage->key_len) {
-		if (output == OUTPUT_HEXDUMP)
-			hexdump(stdout, "Message Key",
-				rkmessage->key, rkmessage->key_len);
-		else
-			printf("Key: %.*s\n",
-			       (int)rkmessage->key_len, (char *)rkmessage->key);
-	}
-
-	if (output == OUTPUT_HEXDUMP)
-		hexdump(stdout, "Message Payload",
-			rkmessage->payload, rkmessage->len);
-	else
-		printf("%.*s\n",
-		       (int)rkmessage->len, (char *)rkmessage->payload);
-}
-
-
-static void print_partition_list (FILE *fp,
-                                  const rd_kafka_topic_partition_list_t
-                                  *partitions) {
-        int i;
-        for (i = 0 ; i < partitions->cnt ; i++) {
-                fprintf(stderr, "%s %s [%"PRId32"] offset %"PRId64,
-                        i > 0 ? ",":"",
-                        partitions->elems[i].topic,
-                        partitions->elems[i].partition,
-			partitions->elems[i].offset);
-        }
-        fprintf(stderr, "\n");
-
-}
-static void rebalance_cb (rd_kafka_t *rk,
-                          rd_kafka_resp_err_t err,
-			  rd_kafka_topic_partition_list_t *partitions,
-                          void *opaque) {
-
-	fprintf(stderr, "%% Consumer group rebalanced: ");
-
-	switch (err)
-	{
-	case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
-		fprintf(stderr, "assigned:\n");
-		print_partition_list(stderr, partitions);
-		rd_kafka_assign(rk, partitions);
-		wait_eof += partitions->cnt;
-		break;
-
-	case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
-		fprintf(stderr, "revoked:\n");
-		print_partition_list(stderr, partitions);
-		rd_kafka_assign(rk, NULL);
-		wait_eof = 0;
-		break;
-
-	default:
-		fprintf(stderr, "failed: %s\n",
-                        rd_kafka_err2str(err));
-                rd_kafka_assign(rk, NULL);
-		break;
-	}
-}
-
-
-static int describe_groups (rd_kafka_t *rk, const char *group) {
-        rd_kafka_resp_err_t err;
-        const struct rd_kafka_group_list *grplist;
-        int i;
-
-        err = rd_kafka_list_groups(rk, group, &grplist, 10000);
-
-        if (err) {
-                fprintf(stderr, "%% Failed to acquire group list: %s\n",
-                        rd_kafka_err2str(err));
-                return -1;
-        }
-
-        for (i = 0 ; i < grplist->group_cnt ; i++) {
-                const struct rd_kafka_group_info *gi = &grplist->groups[i];
-                int j;
-
-                printf("Group \"%s\" in state %s on broker %d (%s:%d)\n",
-                       gi->group, gi->state,
-                       gi->broker.id, gi->broker.host, gi->broker.port);
-                if (gi->err)
-                        printf(" Error: %s\n", rd_kafka_err2str(gi->err));
-                printf(" Protocol type \"%s\", protocol \"%s\", "
-                       "with %d member(s):\n",
-                       gi->protocol_type, gi->protocol, gi->member_cnt);
-
-                for (j = 0 ; j < gi->member_cnt ; j++) {
-                        const struct rd_kafka_group_member_info *mi;
-                        mi = &gi->members[j];
-
-                        printf("  \"%s\", client id \"%s\" on host %s\n",
-                               mi->member_id, mi->client_id, mi->client_host);
-                        printf("    metadata: %d bytes\n",
-                               mi->member_metadata_size);
-                        printf("    assignment: %d bytes\n",
-                               mi->member_assignment_size);
-                }
-                printf("\n");
-        }
-
-        if (group && !grplist->group_cnt)
-                fprintf(stderr, "%% No matching group (%s)\n", group);
-
-        rd_kafka_group_list_destroy(grplist);
-
-        return 0;
-}
-
-
-
-static void sig_usr1 (int sig) {
-	rd_kafka_dump(stdout, rk);
-}
-
-int main (int argc, char **argv) {
-        char mode = 'C';
-	char *brokers = "localhost:9092";
-	int opt;
-	rd_kafka_conf_t *conf;
-	rd_kafka_topic_conf_t *topic_conf;
-	char errstr[512];
-	const char *debug = NULL;
-	int do_conf_dump = 0;
-	char tmp[16];
-        rd_kafka_resp_err_t err;
-        char *group = NULL;
-        rd_kafka_topic_partition_list_t *topics;
-        int is_subscription;
-        int i;
-
-	quiet = !isatty(STDIN_FILENO);
-
-	/* Kafka configuration */
-	conf = rd_kafka_conf_new();
-
-        /* Set logger */
-        rd_kafka_conf_set_log_cb(conf, logger);
-
-	/* Quick termination */
-	snprintf(tmp, sizeof(tmp), "%i", SIGIO);
-	rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
-
-	/* Topic configuration */
-	topic_conf = rd_kafka_topic_conf_new();
-
-	while ((opt = getopt(argc, argv, "g:b:qd:eX:ADO")) != -1) {
-		switch (opt) {
-		case 'b':
-			brokers = optarg;
-			break;
-                case 'g':
-                        group = optarg;
-                        break;
-		case 'e':
-			exit_eof = 1;
-			break;
-		case 'd':
-			debug = optarg;
-			break;
-		case 'q':
-			quiet = 1;
-			break;
-		case 'A':
-			output = OUTPUT_RAW;
-			break;
-		case 'X':
-		{
-			char *name, *val;
-			rd_kafka_conf_res_t res;
-
-			if (!strcmp(optarg, "list") ||
-			    !strcmp(optarg, "help")) {
-				rd_kafka_conf_properties_show(stdout);
-				exit(0);
-			}
-
-			if (!strcmp(optarg, "dump")) {
-				do_conf_dump = 1;
-				continue;
-			}
-
-			name = optarg;
-			if (!(val = strchr(name, '='))) {
-				fprintf(stderr, "%% Expected "
-					"-X property=value, not %s\n", name);
-				exit(1);
-			}
-
-			*val = '\0';
-			val++;
-
-			res = RD_KAFKA_CONF_UNKNOWN;
-			/* Try "topic." prefixed properties on topic
-			 * conf first, and then fall through to global if
-			 * it didnt match a topic configuration property. */
-			if (!strncmp(name, "topic.", strlen("topic.")))
-				res = rd_kafka_topic_conf_set(topic_conf,
-							      name+
-							      strlen("topic."),
-							      val,
-							      errstr,
-							      sizeof(errstr));
-
-			if (res == RD_KAFKA_CONF_UNKNOWN)
-				res = rd_kafka_conf_set(conf, name, val,
-							errstr, sizeof(errstr));
-
-			if (res != RD_KAFKA_CONF_OK) {
-				fprintf(stderr, "%% %s\n", errstr);
-				exit(1);
-			}
-		}
-		break;
-
-                case 'D':
-                case 'O':
-                        mode = opt;
-                        break;
-
-		default:
-			goto usage;
-		}
-	}
-
-
-	if (do_conf_dump) {
-		const char **arr;
-		size_t cnt;
-		int pass;
-
-		for (pass = 0 ; pass < 2 ; pass++) {
-			if (pass == 0) {
-				arr = rd_kafka_conf_dump(conf, &cnt);
-				printf("# Global config\n");
-			} else {
-				printf("# Topic config\n");
-				arr = rd_kafka_topic_conf_dump(topic_conf,
-							       &cnt);
-			}
-
-			for (i = 0 ; i < (int)cnt ; i += 2)
-				printf("%s = %s\n",
-				       arr[i], arr[i+1]);
-
-			printf("\n");
-
-			rd_kafka_conf_dump_free(arr, cnt);
-		}
-
-		exit(0);
-	}
-
-
-	if (strchr("OC", mode) && optind == argc) {
-	usage:
-		fprintf(stderr,
-			"Usage: %s [options] <topic[:part]> <topic[:part]>..\n"
-			"\n"
-			"librdkafka version %s (0x%08x)\n"
-			"\n"
-			" Options:\n"
-                        "  -g <group>      Consumer group (%s)\n"
-			"  -b <brokers>    Broker address (%s)\n"
-			"  -e              Exit consumer when last message\n"
-			"                  in partition has been received.\n"
-                        "  -D              Describe group.\n"
-                        "  -O              Get commmitted offset(s)\n"
-			"  -d [facs..]     Enable debugging contexts:\n"
-			"                  %s\n"
-			"  -q              Be quiet\n"
-			"  -A              Raw payload output (consumer)\n"
-			"  -X <prop=name> Set arbitrary librdkafka "
-			"configuration property\n"
-			"               Properties prefixed with \"topic.\" "
-			"will be set on topic object.\n"
-			"               Use '-X list' to see the full list\n"
-			"               of supported properties.\n"
-			"\n"
-                        "For balanced consumer groups use the 'topic1 topic2..'"
-                        " format\n"
-                        "and for static assignment use "
-                        "'topic1:part1 topic1:part2 topic2:part1..'\n"
-			"\n",
-			argv[0],
-			rd_kafka_version_str(), rd_kafka_version(),
-                        group, brokers,
-			RD_KAFKA_DEBUG_CONTEXTS);
-		exit(1);
-	}
-
-
-	signal(SIGINT, stop);
-	signal(SIGUSR1, sig_usr1);
-
-	if (debug &&
-	    rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) !=
-	    RD_KAFKA_CONF_OK) {
-		fprintf(stderr, "%% Debug configuration failed: %s: %s\n",
-			errstr, debug);
-		exit(1);
-	}
-
-        /*
-         * Client/Consumer group
-         */
-
-        if (strchr("CO", mode)) {
-                /* Consumer groups require a group id */
-                if (!group)
-                        group = "rdkafka_consumer_example";
-                if (rd_kafka_conf_set(conf, "group.id", group,
-                                      errstr, sizeof(errstr)) !=
-                    RD_KAFKA_CONF_OK) {
-                        fprintf(stderr, "%% %s\n", errstr);
-                        exit(1);
-                }
-
-                /* Consumer groups always use broker based offset storage */
-                if (rd_kafka_topic_conf_set(topic_conf, "offset.store.method",
-                                            "broker",
-                                            errstr, sizeof(errstr)) !=
-                    RD_KAFKA_CONF_OK) {
-                        fprintf(stderr, "%% %s\n", errstr);
-                        exit(1);
-                }
-
-                /* Set default topic config for pattern-matched topics. */
-                rd_kafka_conf_set_default_topic_conf(conf, topic_conf);
-
-                /* Callback called on partition assignment changes */
-                rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
-        }
-
-        /* Create Kafka handle */
-        if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
-                                errstr, sizeof(errstr)))) {
-                fprintf(stderr,
-                        "%% Failed to create new consumer: %s\n",
-                        errstr);
-                exit(1);
-        }
-
-        /* Add brokers */
-        if (rd_kafka_brokers_add(rk, brokers) == 0) {
-                fprintf(stderr, "%% No valid brokers specified\n");
-                exit(1);
-        }
-
-
-        if (mode == 'D') {
-                int r;
-                /* Describe groups */
-                r = describe_groups(rk, group);
-
-                rd_kafka_destroy(rk);
-                exit(r == -1 ? 1 : 0);
-        }
-
-        /* Redirect rd_kafka_poll() to consumer_poll() */
-        rd_kafka_poll_set_consumer(rk);
-
-        topics = rd_kafka_topic_partition_list_new(argc - optind);
-        is_subscription = 1;
-        for (i = optind ; i < argc ; i++) {
-                /* Parse "topic[:part] */
-                char *topic = argv[i];
-                char *t;
-                int32_t partition = -1;
-
-                if ((t = strstr(topic, ":"))) {
-                        *t = '\0';
-                        partition = atoi(t+1);
-                        is_subscription = 0; /* is assignment */
-                        wait_eof++;
-                }
-
-                rd_kafka_topic_partition_list_add(topics, topic, partition);
-        }
-
-        if (mode == 'O') {
-                /* Offset query */
-
-                err = rd_kafka_committed(rk, topics, 5000);
-                if (err) {
-                        fprintf(stderr, "%% Failed to fetch offsets: %s\n",
-                                rd_kafka_err2str(err));
-                        exit(1);
-                }
-
-                for (i = 0 ; i < topics->cnt ; i++) {
-                        rd_kafka_topic_partition_t *p = &topics->elems[i];
-                        printf("Topic \"%s\" partition %"PRId32,
-                               p->topic, p->partition);
-                        if (p->err)
-                                printf(" error %s",
-                                       rd_kafka_err2str(p->err));
-                        else {
-                                printf(" offset %"PRId64"",
-                                       p->offset);
-
-                                if (p->metadata_size)
-                                        printf(" (%d bytes of metadata)",
-                                               (int)p->metadata_size);
-                        }
-                        printf("\n");
-                }
-
-                goto done;
-        }
-
-
-        if (is_subscription) {
-                fprintf(stderr, "%% Subscribing to %d topics\n", topics->cnt);
-
-                if ((err = rd_kafka_subscribe(rk, topics))) {
-                        fprintf(stderr,
-                                "%% Failed to start consuming topics: %s\n",
-                                rd_kafka_err2str(err));
-                        exit(1);
-                }
-        } else {
-                fprintf(stderr, "%% Assigning %d partitions\n", topics->cnt);
-
-                if ((err = rd_kafka_assign(rk, topics))) {
-                        fprintf(stderr,
-                                "%% Failed to assign partitions: %s\n",
-                                rd_kafka_err2str(err));
-                }
-        }
-
-        while (run) {
-                rd_kafka_message_t *rkmessage;
-
-                rkmessage = rd_kafka_consumer_poll(rk, 1000);
-                if (rkmessage) {
-                        msg_consume(rkmessage);
-                        rd_kafka_message_destroy(rkmessage);
-                }
-        }
-
-done:
-        err = rd_kafka_consumer_close(rk);
-        if (err)
-                fprintf(stderr, "%% Failed to close consumer: %s\n",
-                        rd_kafka_err2str(err));
-        else
-                fprintf(stderr, "%% Consumer closed\n");
-
-        rd_kafka_topic_partition_list_destroy(topics);
-
-        /* Destroy handle */
-        rd_kafka_destroy(rk);
-
-	/* Let background threads clean up and terminate cleanly. */
-	run = 5;
-	while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1)
-		printf("Waiting for librdkafka to decommission\n");
-	if (run <= 0)
-		rd_kafka_dump(stdout, rk);
-
-	return 0;
-}
diff --git a/thirdparty/librdkafka-0.11.4/examples/rdkafka_consumer_example.cpp b/thirdparty/librdkafka-0.11.4/examples/rdkafka_consumer_example.cpp
deleted file mode 100644
index 83da691..0000000
--- a/thirdparty/librdkafka-0.11.4/examples/rdkafka_consumer_example.cpp
+++ /dev/null
@@ -1,485 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2014, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Apache Kafka consumer & producer example programs
- * using the Kafka driver from librdkafka
- * (https://github.com/edenhill/librdkafka)
- */
-
-#include <iostream>
-#include <string>
-#include <cstdlib>
-#include <cstdio>
-#include <csignal>
-#include <cstring>
-
-#ifndef _MSC_VER
-#include <sys/time.h>
-#endif
-
-#ifdef _MSC_VER
-#include "../win32/wingetopt.h"
-#include <atltime.h>
-#elif _AIX
-#include <unistd.h>
-#else
-#include <getopt.h>
-#include <unistd.h>
-#endif
-
-/*
- * Typically include path in a real application would be
- * #include <librdkafka/rdkafkacpp.h>
- */
-#include "rdkafkacpp.h"
-
-
-
-static bool run = true;
-static bool exit_eof = false;
-static int eof_cnt = 0;
-static int partition_cnt = 0;
-static int verbosity = 1;
-static long msg_cnt = 0;
-static int64_t msg_bytes = 0;
-static void sigterm (int sig) {
-  run = false;
-}
-
-
-/**
- * @brief format a string timestamp from the current time
- */
-static void print_time () {
-#ifndef _MSC_VER
-        struct timeval tv;
-        char buf[64];
-        gettimeofday(&tv, NULL);
-        strftime(buf, sizeof(buf) - 1, "%Y-%m-%d %H:%M:%S", localtime(&tv.tv_sec));
-        fprintf(stderr, "%s.%03d: ", buf, (int)(tv.tv_usec / 1000));
-#else
-        std::wcerr << CTime::GetCurrentTime().Format(_T("%Y-%m-%d %H:%M:%S")).GetString()
-                << ": ";
-#endif
-}
-class ExampleEventCb : public RdKafka::EventCb {
- public:
-  void event_cb (RdKafka::Event &event) {
-
-    print_time();
-
-    switch (event.type())
-    {
-      case RdKafka::Event::EVENT_ERROR:
-        std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
-            event.str() << std::endl;
-        if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
-          run = false;
-        break;
-
-      case RdKafka::Event::EVENT_STATS:
-        std::cerr << "\"STATS\": " << event.str() << std::endl;
-        break;
-
-      case RdKafka::Event::EVENT_LOG:
-        fprintf(stderr, "LOG-%i-%s: %s\n",
-                event.severity(), event.fac().c_str(), event.str().c_str());
-        break;
-
-      case RdKafka::Event::EVENT_THROTTLE:
-	std::cerr << "THROTTLED: " << event.throttle_time() << "ms by " <<
-	  event.broker_name() << " id " << (int)event.broker_id() << std::endl;
-	break;
-
-      default:
-        std::cerr << "EVENT " << event.type() <<
-            " (" << RdKafka::err2str(event.err()) << "): " <<
-            event.str() << std::endl;
-        break;
-    }
-  }
-};
-
-
-class ExampleRebalanceCb : public RdKafka::RebalanceCb {
-private:
-  static void part_list_print (const std::vector<RdKafka::TopicPartition*>&partitions){
-    for (unsigned int i = 0 ; i < partitions.size() ; i++)
-      std::cerr << partitions[i]->topic() <<
-	"[" << partitions[i]->partition() << "], ";
-    std::cerr << "\n";
-  }
-
-public:
-  void rebalance_cb (RdKafka::KafkaConsumer *consumer,
-		     RdKafka::ErrorCode err,
-                     std::vector<RdKafka::TopicPartition*> &partitions) {
-    std::cerr << "RebalanceCb: " << RdKafka::err2str(err) << ": ";
-
-    part_list_print(partitions);
-
-    if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
-      consumer->assign(partitions);
-      partition_cnt = (int)partitions.size();
-    } else {
-      consumer->unassign();
-      partition_cnt = 0;
-    }
-    eof_cnt = 0;
-  }
-};
-
-
-void msg_consume(RdKafka::Message* message, void* opaque) {
-  switch (message->err()) {
-    case RdKafka::ERR__TIMED_OUT:
-      break;
-
-    case RdKafka::ERR_NO_ERROR:
-      /* Real message */
-      msg_cnt++;
-      msg_bytes += message->len();
-      if (verbosity >= 3)
-        std::cerr << "Read msg at offset " << message->offset() << std::endl;
-      RdKafka::MessageTimestamp ts;
-      ts = message->timestamp();
-      if (verbosity >= 2 &&
-	  ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) {
-	std::string tsname = "?";
-	if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME)
-	  tsname = "create time";
-        else if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_LOG_APPEND_TIME)
-          tsname = "log append time";
-        std::cout << "Timestamp: " << tsname << " " << ts.timestamp << std::endl;
-      }
-      if (verbosity >= 2 && message->key()) {
-        std::cout << "Key: " << *message->key() << std::endl;
-      }
-      if (verbosity >= 1) {
-        printf("%.*s\n",
-               static_cast<int>(message->len()),
-               static_cast<const char *>(message->payload()));
-      }
-      break;
-
-    case RdKafka::ERR__PARTITION_EOF:
-      /* Last message */
-      if (exit_eof && ++eof_cnt == partition_cnt) {
-        std::cerr << "%% EOF reached for all " << partition_cnt <<
-            " partition(s)" << std::endl;
-        run = false;
-      }
-      break;
-
-    case RdKafka::ERR__UNKNOWN_TOPIC:
-    case RdKafka::ERR__UNKNOWN_PARTITION:
-      std::cerr << "Consume failed: " << message->errstr() << std::endl;
-      run = false;
-      break;
-
-    default:
-      /* Errors */
-      std::cerr << "Consume failed: " << message->errstr() << std::endl;
-      run = false;
-  }
-}
-
-
-class ExampleConsumeCb : public RdKafka::ConsumeCb {
- public:
-  void consume_cb (RdKafka::Message &msg, void *opaque) {
-    msg_consume(&msg, opaque);
-  }
-};
-
-
-
-int main (int argc, char **argv) {
-  std::string brokers = "localhost";
-  std::string errstr;
-  std::string topic_str;
-  std::string mode;
-  std::string debug;
-  std::vector<std::string> topics;
-  bool do_conf_dump = false;
-  int opt;
-  int use_ccb = 0;
-
-  /*
-   * Create configuration objects
-   */
-  RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
-  RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
-
-  ExampleRebalanceCb ex_rebalance_cb;
-  conf->set("rebalance_cb", &ex_rebalance_cb, errstr);
-
-  while ((opt = getopt(argc, argv, "g:b:z:qd:eX:AM:f:qv")) != -1) {
-    switch (opt) {
-    case 'g':
-      if (conf->set("group.id",  optarg, errstr) != RdKafka::Conf::CONF_OK) {
-        std::cerr << errstr << std::endl;
-        exit(1);
-      }
-      break;
-    case 'b':
-      brokers = optarg;
-      break;
-    case 'z':
-      if (conf->set("compression.codec", optarg, errstr) !=
-	  RdKafka::Conf::CONF_OK) {
-	std::cerr << errstr << std::endl;
-	exit(1);
-      }
-      break;
-    case 'e':
-      exit_eof = true;
-      break;
-    case 'd':
-      debug = optarg;
-      break;
-    case 'M':
-      if (conf->set("statistics.interval.ms", optarg, errstr) !=
-          RdKafka::Conf::CONF_OK) {
-        std::cerr << errstr << std::endl;
-        exit(1);
-      }
-      break;
-    case 'X':
-      {
-	char *name, *val;
-
-	if (!strcmp(optarg, "dump")) {
-	  do_conf_dump = true;
-	  continue;
-	}
-
-	name = optarg;
-	if (!(val = strchr(name, '='))) {
-          std::cerr << "%% Expected -X property=value, not " <<
-              name << std::endl;
-	  exit(1);
-	}
-
-	*val = '\0';
-	val++;
-
-	/* Try "topic." prefixed properties on topic
-	 * conf first, and then fall through to global if
-	 * it didnt match a topic configuration property. */
-        RdKafka::Conf::ConfResult res = RdKafka::Conf::CONF_UNKNOWN;
-	if (!strncmp(name, "topic.", strlen("topic.")))
-          res = tconf->set(name+strlen("topic."), val, errstr);
-        if (res == RdKafka::Conf::CONF_UNKNOWN)
-	  res = conf->set(name, val, errstr);
-
-	if (res != RdKafka::Conf::CONF_OK) {
-          std::cerr << errstr << std::endl;
-	  exit(1);
-	}
-      }
-      break;
-
-      case 'f':
-        if (!strcmp(optarg, "ccb"))
-          use_ccb = 1;
-        else {
-          std::cerr << "Unknown option: " << optarg << std::endl;
-          exit(1);
-        }
-        break;
-
-      case 'q':
-        verbosity--;
-        break;
-
-      case 'v':
-        verbosity++;
-        break;
-
-    default:
-      goto usage;
-    }
-  }
-
-  for (; optind < argc ; optind++)
-    topics.push_back(std::string(argv[optind]));
-
-  if (topics.empty() || optind != argc) {
-  usage:
-    fprintf(stderr,
-            "Usage: %s -g <group-id> [options] topic1 topic2..\n"
-            "\n"
-            "librdkafka version %s (0x%08x)\n"
-            "\n"
-            " Options:\n"
-            "  -g <group-id>   Consumer group id\n"
-            "  -b <brokers>    Broker address (localhost:9092)\n"
-            "  -z <codec>      Enable compression:\n"
-            "                  none|gzip|snappy\n"
-            "  -e              Exit consumer when last message\n"
-            "                  in partition has been received.\n"
-            "  -d [facs..]     Enable debugging contexts:\n"
-            "                  %s\n"
-            "  -M <intervalms> Enable statistics\n"
-            "  -X <prop=name>  Set arbitrary librdkafka "
-            "configuration property\n"
-            "                  Properties prefixed with \"topic.\" "
-            "will be set on topic object.\n"
-            "                  Use '-X list' to see the full list\n"
-            "                  of supported properties.\n"
-            "  -f <flag>       Set option:\n"
-            "                     ccb - use consume_callback\n"
-            "  -q              Quiet / Decrease verbosity\n"
-            "  -v              Increase verbosity\n"
-            "\n"
-            "\n",
-	    argv[0],
-	    RdKafka::version_str().c_str(), RdKafka::version(),
-	    RdKafka::get_debug_contexts().c_str());
-	exit(1);
-  }
-
-
-  /*
-   * Set configuration properties
-   */
-  conf->set("metadata.broker.list", brokers, errstr);
-
-  if (!debug.empty()) {
-    if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) {
-      std::cerr << errstr << std::endl;
-      exit(1);
-    }
-  }
-
-  ExampleConsumeCb ex_consume_cb;
-
-  if(use_ccb) {
-    conf->set("consume_cb", &ex_consume_cb, errstr);
-  }
-
-  ExampleEventCb ex_event_cb;
-  conf->set("event_cb", &ex_event_cb, errstr);
-
-  if (do_conf_dump) {
-    int pass;
-
-    for (pass = 0 ; pass < 2 ; pass++) {
-      std::list<std::string> *dump;
-      if (pass == 0) {
-        dump = conf->dump();
-        std::cout << "# Global config" << std::endl;
-      } else {
-        dump = tconf->dump();
-        std::cout << "# Topic config" << std::endl;
-      }
-
-      for (std::list<std::string>::iterator it = dump->begin();
-           it != dump->end(); ) {
-        std::cout << *it << " = ";
-        it++;
-        std::cout << *it << std::endl;
-        it++;
-      }
-      std::cout << std::endl;
-    }
-    exit(0);
-  }
-
-  conf->set("default_topic_conf", tconf, errstr);
-  delete tconf;
-
-  signal(SIGINT, sigterm);
-  signal(SIGTERM, sigterm);
-
-
-  /*
-   * Consumer mode
-   */
-
-  /*
-   * Create consumer using accumulated global configuration.
-   */
-  RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create(conf, errstr);
-  if (!consumer) {
-    std::cerr << "Failed to create consumer: " << errstr << std::endl;
-    exit(1);
-  }
-
-  delete conf;
-
-  std::cout << "% Created consumer " << consumer->name() << std::endl;
-
-
-  /*
-   * Subscribe to topics
-   */
-  RdKafka::ErrorCode err = consumer->subscribe(topics);
-  if (err) {
-    std::cerr << "Failed to subscribe to " << topics.size() << " topics: "
-              << RdKafka::err2str(err) << std::endl;
-    exit(1);
-  }
-
-  /*
-   * Consume messages
-   */
-  while (run) {
-    RdKafka::Message *msg = consumer->consume(1000);
-    if (!use_ccb) {
-      msg_consume(msg, NULL);
-    }
-    delete msg;
-  }
-
-#ifndef _MSC_VER
-  alarm(10);
-#endif
-
-  /*
-   * Stop consumer
-   */
-  consumer->close();
-  delete consumer;
-
-  std::cerr << "% Consumed " << msg_cnt << " messages ("
-            << msg_bytes << " bytes)" << std::endl;
-
-  /*
-   * Wait for RdKafka to decommission.
-   * This is not strictly needed (with check outq_len() above), but
-   * allows RdKafka to clean up all its resources before the application
-   * exits so that memory profilers such as valgrind wont complain about
-   * memory leaks.
-   */
-  RdKafka::wait_destroyed(5000);
-
-  return 0;
-}
diff --git a/thirdparty/librdkafka-0.11.4/examples/rdkafka_example.c b/thirdparty/librdkafka-0.11.4/examples/rdkafka_example.c
deleted file mode 100644
index 77c345e..0000000
--- a/thirdparty/librdkafka-0.11.4/examples/rdkafka_example.c
+++ /dev/null
@@ -1,885 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Apache Kafka consumer & producer example programs
- * using the Kafka driver from librdkafka
- * (https://github.com/edenhill/librdkafka)
- */
-
-#include <ctype.h>
-#include <signal.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <syslog.h>
-#include <time.h>
-#include <sys/time.h>
-#include <getopt.h>
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is builtin from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h"  /* for Kafka driver */
-
-
-static int run = 1;
-static rd_kafka_t *rk;
-static int exit_eof = 0;
-static int quiet = 0;
-static 	enum {
-	OUTPUT_HEXDUMP,
-	OUTPUT_RAW,
-} output = OUTPUT_HEXDUMP;
-
-static void stop (int sig) {
-	run = 0;
-	fclose(stdin); /* abort fgets() */
-}
-
-
-static void hexdump (FILE *fp, const char *name, const void *ptr, size_t len) {
-	const char *p = (const char *)ptr;
-	size_t of = 0;
-
-
-	if (name)
-		fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
-
-	for (of = 0 ; of < len ; of += 16) {
-		char hexen[16*3+1];
-		char charen[16+1];
-		int hof = 0;
-
-		int cof = 0;
-		int i;
-
-		for (i = of ; i < (int)of + 16 && i < (int)len ; i++) {
-			hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff);
-			cof += sprintf(charen+cof, "%c",
-				       isprint((int)p[i]) ? p[i] : '.');
-		}
-		fprintf(fp, "%08zx: %-48s %-16s\n",
-			of, hexen, charen);
-	}
-}
-
-/**
- * Kafka logger callback (optional)
- */
-static void logger (const rd_kafka_t *rk, int level,
-		    const char *fac, const char *buf) {
-	struct timeval tv;
-	gettimeofday(&tv, NULL);
-	fprintf(stderr, "%u.%03u RDKAFKA-%i-%s: %s: %s\n",
-		(int)tv.tv_sec, (int)(tv.tv_usec / 1000),
-		level, fac, rk ? rd_kafka_name(rk) : NULL, buf);
-}
-
-/**
- * Message delivery report callback.
- * Called once for each message.
- * See rdkafka.h for more information.
- */
-static void msg_delivered (rd_kafka_t *rk,
-			   void *payload, size_t len,
-			   int error_code,
-			   void *opaque, void *msg_opaque) {
-
-	if (error_code)
-		fprintf(stderr, "%% Message delivery failed: %s\n",
-			rd_kafka_err2str(error_code));
-	else if (!quiet)
-		fprintf(stderr, "%% Message delivered (%zd bytes): %.*s\n", len,
-			(int)len, (const char *)payload);
-}
-
-/**
- * Message delivery report callback using the richer rd_kafka_message_t object.
- */
-static void msg_delivered2 (rd_kafka_t *rk,
-                            const rd_kafka_message_t *rkmessage, void *opaque) {
-	printf("del: %s: offset %"PRId64"\n",
-	       rd_kafka_err2str(rkmessage->err), rkmessage->offset);
-        if (rkmessage->err)
-		fprintf(stderr, "%% Message delivery failed: %s\n",
-                        rd_kafka_err2str(rkmessage->err));
-	else if (!quiet)
-		fprintf(stderr,
-                        "%% Message delivered (%zd bytes, offset %"PRId64", "
-                        "partition %"PRId32"): %.*s\n",
-                        rkmessage->len, rkmessage->offset,
-			rkmessage->partition,
-			(int)rkmessage->len, (const char *)rkmessage->payload);
-}
-
-
-static void msg_consume (rd_kafka_message_t *rkmessage,
-			 void *opaque) {
-	if (rkmessage->err) {
-		if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
-			fprintf(stderr,
-				"%% Consumer reached end of %s [%"PRId32"] "
-			       "message queue at offset %"PRId64"\n",
-			       rd_kafka_topic_name(rkmessage->rkt),
-			       rkmessage->partition, rkmessage->offset);
-
-			if (exit_eof)
-				run = 0;
-
-			return;
-		}
-
-		fprintf(stderr, "%% Consume error for topic \"%s\" [%"PRId32"] "
-		       "offset %"PRId64": %s\n",
-		       rd_kafka_topic_name(rkmessage->rkt),
-		       rkmessage->partition,
-		       rkmessage->offset,
-		       rd_kafka_message_errstr(rkmessage));
-
-                if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
-                    rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
-                        run = 0;
-		return;
-	}
-
-	if (!quiet) {
-		rd_kafka_timestamp_type_t tstype;
-		int64_t timestamp;
-                rd_kafka_headers_t *hdrs;
-
-		fprintf(stdout, "%% Message (offset %"PRId64", %zd bytes):\n",
-			rkmessage->offset, rkmessage->len);
-
-		timestamp = rd_kafka_message_timestamp(rkmessage, &tstype);
-		if (tstype != RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) {
-			const char *tsname = "?";
-			if (tstype == RD_KAFKA_TIMESTAMP_CREATE_TIME)
-				tsname = "create time";
-			else if (tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME)
-				tsname = "log append time";
-
-			fprintf(stdout, "%% Message timestamp: %s %"PRId64
-				" (%ds ago)\n",
-				tsname, timestamp,
-				!timestamp ? 0 :
-				(int)time(NULL) - (int)(timestamp/1000));
-		}
-
-                if (!rd_kafka_message_headers(rkmessage, &hdrs)) {
-                        size_t idx = 0;
-                        const char *name;
-                        const void *val;
-                        size_t size;
-
-                        fprintf(stdout, "%% Headers:");
-
-                        while (!rd_kafka_header_get_all(hdrs, idx++,
-                                                        &name, &val, &size)) {
-                                fprintf(stdout, "%s%s=",
-                                        idx == 1 ? " " : ", ", name);
-                                if (val)
-                                        fprintf(stdout, "\"%.*s\"",
-                                                (int)size, (const char *)val);
-                                else
-                                        fprintf(stdout, "NULL");
-                        }
-                        fprintf(stdout, "\n");
-                }
-	}
-
-	if (rkmessage->key_len) {
-		if (output == OUTPUT_HEXDUMP)
-			hexdump(stdout, "Message Key",
-				rkmessage->key, rkmessage->key_len);
-		else
-			printf("Key: %.*s\n",
-			       (int)rkmessage->key_len, (char *)rkmessage->key);
-	}
-
-	if (output == OUTPUT_HEXDUMP)
-		hexdump(stdout, "Message Payload",
-			rkmessage->payload, rkmessage->len);
-	else
-		printf("%.*s\n",
-		       (int)rkmessage->len, (char *)rkmessage->payload);
-}
-
-
-static void metadata_print (const char *topic,
-                            const struct rd_kafka_metadata *metadata) {
-        int i, j, k;
-
-        printf("Metadata for %s (from broker %"PRId32": %s):\n",
-               topic ? : "all topics",
-               metadata->orig_broker_id,
-               metadata->orig_broker_name);
-
-
-        /* Iterate brokers */
-        printf(" %i brokers:\n", metadata->broker_cnt);
-        for (i = 0 ; i < metadata->broker_cnt ; i++)
-                printf("  broker %"PRId32" at %s:%i\n",
-                       metadata->brokers[i].id,
-                       metadata->brokers[i].host,
-                       metadata->brokers[i].port);
-
-        /* Iterate topics */
-        printf(" %i topics:\n", metadata->topic_cnt);
-        for (i = 0 ; i < metadata->topic_cnt ; i++) {
-                const struct rd_kafka_metadata_topic *t = &metadata->topics[i];
-                printf("  topic \"%s\" with %i partitions:",
-                       t->topic,
-                       t->partition_cnt);
-                if (t->err) {
-                        printf(" %s", rd_kafka_err2str(t->err));
-                        if (t->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
-                                printf(" (try again)");
-                }
-                printf("\n");
-
-                /* Iterate topic's partitions */
-                for (j = 0 ; j < t->partition_cnt ; j++) {
-                        const struct rd_kafka_metadata_partition *p;
-                        p = &t->partitions[j];
-                        printf("    partition %"PRId32", "
-                               "leader %"PRId32", replicas: ",
-                               p->id, p->leader);
-
-                        /* Iterate partition's replicas */
-                        for (k = 0 ; k < p->replica_cnt ; k++)
-                                printf("%s%"PRId32,
-                                       k > 0 ? ",":"", p->replicas[k]);
-
-                        /* Iterate partition's ISRs */
-                        printf(", isrs: ");
-                        for (k = 0 ; k < p->isr_cnt ; k++)
-                                printf("%s%"PRId32,
-                                       k > 0 ? ",":"", p->isrs[k]);
-                        if (p->err)
-                                printf(", %s\n", rd_kafka_err2str(p->err));
-                        else
-                                printf("\n");
-                }
-        }
-}
-
-
-static void sig_usr1 (int sig) {
-	rd_kafka_dump(stdout, rk);
-}
-
-int main (int argc, char **argv) {
-	rd_kafka_topic_t *rkt;
-	char *brokers = "localhost:9092";
-	char mode = 'C';
-	char *topic = NULL;
-	int partition = RD_KAFKA_PARTITION_UA;
-	int opt;
-	rd_kafka_conf_t *conf;
-	rd_kafka_topic_conf_t *topic_conf;
-	char errstr[512];
-	int64_t start_offset = 0;
-        int report_offsets = 0;
-	int do_conf_dump = 0;
-	char tmp[16];
-        int64_t seek_offset = 0;
-        int64_t tmp_offset = 0;
-	int get_wmarks = 0;
-        rd_kafka_headers_t *hdrs = NULL;
-        rd_kafka_resp_err_t err;
-
-	/* Kafka configuration */
-	conf = rd_kafka_conf_new();
-
-        /* Set logger */
-        rd_kafka_conf_set_log_cb(conf, logger);
-
-	/* Quick termination */
-	snprintf(tmp, sizeof(tmp), "%i", SIGIO);
-	rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
-
-	/* Topic configuration */
-	topic_conf = rd_kafka_topic_conf_new();
-
-	while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:As:H:")) != -1) {
-		switch (opt) {
-		case 'P':
-		case 'C':
-                case 'L':
-			mode = opt;
-			break;
-		case 't':
-			topic = optarg;
-			break;
-		case 'p':
-			partition = atoi(optarg);
-			break;
-		case 'b':
-			brokers = optarg;
-			break;
-		case 'z':
-			if (rd_kafka_conf_set(conf, "compression.codec",
-					      optarg,
-					      errstr, sizeof(errstr)) !=
-			    RD_KAFKA_CONF_OK) {
-				fprintf(stderr, "%% %s\n", errstr);
-				exit(1);
-			}
-			break;
-		case 'o':
-                case 's':
-			if (!strcmp(optarg, "end"))
-				tmp_offset = RD_KAFKA_OFFSET_END;
-			else if (!strcmp(optarg, "beginning"))
-				tmp_offset = RD_KAFKA_OFFSET_BEGINNING;
-			else if (!strcmp(optarg, "stored"))
-				tmp_offset = RD_KAFKA_OFFSET_STORED;
-                        else if (!strcmp(optarg, "report"))
-                                report_offsets = 1;
-			else if (!strcmp(optarg, "wmark"))
-				get_wmarks = 1;
-			else {
-				tmp_offset = strtoll(optarg, NULL, 10);
-
-				if (tmp_offset < 0)
-					tmp_offset = RD_KAFKA_OFFSET_TAIL(-tmp_offset);
-			}
-
-                        if (opt == 'o')
-                                start_offset = tmp_offset;
-                        else if (opt == 's')
-                                seek_offset = tmp_offset;
-			break;
-		case 'e':
-			exit_eof = 1;
-			break;
-		case 'd':
-			if (rd_kafka_conf_set(conf, "debug", optarg,
-					      errstr, sizeof(errstr)) !=
-			    RD_KAFKA_CONF_OK) {
-				fprintf(stderr,
-					"%% Debug configuration failed: "
-					"%s: %s\n",
-					errstr, optarg);
-				exit(1);
-			}
-			break;
-		case 'q':
-			quiet = 1;
-			break;
-		case 'A':
-			output = OUTPUT_RAW;
-			break;
-                case 'H':
-                {
-                        char *name, *val;
-                        size_t name_sz = -1;
-
-                        name = optarg;
-                        val = strchr(name, '=');
-                        if (val) {
-                                name_sz = (size_t)(val-name);
-                                val++; /* past the '=' */
-                        }
-
-                        if (!hdrs)
-                                hdrs = rd_kafka_headers_new(8);
-
-                        err = rd_kafka_header_add(hdrs, name, name_sz, val, -1);
-                        if (err) {
-                                fprintf(stderr,
-                                        "%% Failed to add header %s: %s\n",
-                                        name, rd_kafka_err2str(err));
-                                exit(1);
-                        }
-                }
-                break;
-
-		case 'X':
-		{
-			char *name, *val;
-			rd_kafka_conf_res_t res;
-
-			if (!strcmp(optarg, "list") ||
-			    !strcmp(optarg, "help")) {
-				rd_kafka_conf_properties_show(stdout);
-				exit(0);
-			}
-
-			if (!strcmp(optarg, "dump")) {
-				do_conf_dump = 1;
-				continue;
-			}
-
-			name = optarg;
-			if (!(val = strchr(name, '='))) {
-				char dest[512];
-				size_t dest_size = sizeof(dest);
-				/* Return current value for property. */
-
-				res = RD_KAFKA_CONF_UNKNOWN;
-				if (!strncmp(name, "topic.", strlen("topic.")))
-					res = rd_kafka_topic_conf_get(
-						topic_conf,
-						name+strlen("topic."),
-						dest, &dest_size);
-				if (res == RD_KAFKA_CONF_UNKNOWN)
-					res = rd_kafka_conf_get(
-						conf, name, dest, &dest_size);
-
-				if (res == RD_KAFKA_CONF_OK) {
-					printf("%s = %s\n", name, dest);
-					exit(0);
-				} else {
-					fprintf(stderr,
-						"%% %s property\n",
-						res == RD_KAFKA_CONF_UNKNOWN ?
-						"Unknown" : "Invalid");
-					exit(1);
-				}
-			}
-
-			*val = '\0';
-			val++;
-
-			res = RD_KAFKA_CONF_UNKNOWN;
-			/* Try "topic." prefixed properties on topic
-			 * conf first, and then fall through to global if
-			 * it didnt match a topic configuration property. */
-			if (!strncmp(name, "topic.", strlen("topic.")))
-				res = rd_kafka_topic_conf_set(topic_conf,
-							      name+
-							      strlen("topic."),
-							      val,
-							      errstr,
-							      sizeof(errstr));
-
-			if (res == RD_KAFKA_CONF_UNKNOWN)
-				res = rd_kafka_conf_set(conf, name, val,
-							errstr, sizeof(errstr));
-
-			if (res != RD_KAFKA_CONF_OK) {
-				fprintf(stderr, "%% %s\n", errstr);
-				exit(1);
-			}
-		}
-		break;
-
-		default:
-			goto usage;
-		}
-	}
-
-
-	if (do_conf_dump) {
-		const char **arr;
-		size_t cnt;
-		int pass;
-
-		for (pass = 0 ; pass < 2 ; pass++) {
-			int i;
-
-			if (pass == 0) {
-				arr = rd_kafka_conf_dump(conf, &cnt);
-				printf("# Global config\n");
-			} else {
-				printf("# Topic config\n");
-				arr = rd_kafka_topic_conf_dump(topic_conf,
-							       &cnt);
-			}
-
-			for (i = 0 ; i < (int)cnt ; i += 2)
-				printf("%s = %s\n",
-				       arr[i], arr[i+1]);
-
-			printf("\n");
-
-			rd_kafka_conf_dump_free(arr, cnt);
-		}
-
-		exit(0);
-	}
-
-
-	if (optind != argc || (mode != 'L' && !topic)) {
-	usage:
-		fprintf(stderr,
-			"Usage: %s -C|-P|-L -t <topic> "
-			"[-p <partition>] [-b <host1:port1,host2:port2,..>]\n"
-			"\n"
-			"librdkafka version %s (0x%08x)\n"
-			"\n"
-			" Options:\n"
-			"  -C | -P         Consumer or Producer mode\n"
-                        "  -L              Metadata list mode\n"
-			"  -t <topic>      Topic to fetch / produce\n"
-			"  -p <num>        Partition (random partitioner)\n"
-			"  -b <brokers>    Broker address (localhost:9092)\n"
-			"  -z <codec>      Enable compression:\n"
-			"                  none|gzip|snappy\n"
-			"  -o <offset>     Start offset (consumer):\n"
-			"                  beginning, end, NNNNN or -NNNNN\n"
-			"                  wmark returns the current hi&lo "
-			"watermarks.\n"
-                        "  -o report       Report message offsets (producer)\n"
-			"  -e              Exit consumer when last message\n"
-			"                  in partition has been received.\n"
-			"  -d [facs..]     Enable debugging contexts:\n"
-			"                  %s\n"
-			"  -q              Be quiet\n"
-			"  -A              Raw payload output (consumer)\n"
-                        "  -H <name[=value]> Add header to message (producer)\n"
-			"  -X <prop=name>  Set arbitrary librdkafka "
-			"configuration property\n"
-			"                  Properties prefixed with \"topic.\" "
-			"will be set on topic object.\n"
-			"  -X list         Show full list of supported "
-			"properties.\n"
-			"  -X <prop>       Get single property value\n"
-			"\n"
-			" In Consumer mode:\n"
-			"  writes fetched messages to stdout\n"
-			" In Producer mode:\n"
-			"  reads messages from stdin and sends to broker\n"
-                        " In List mode:\n"
-                        "  queries broker for metadata information, "
-                        "topic is optional.\n"
-			"\n"
-			"\n"
-			"\n",
-			argv[0],
-			rd_kafka_version_str(), rd_kafka_version(),
-			RD_KAFKA_DEBUG_CONTEXTS);
-		exit(1);
-	}
-
-	if ((mode == 'C' && !isatty(STDIN_FILENO)) ||
-	    (mode != 'C' && !isatty(STDOUT_FILENO)))
-		quiet = 1;
-
-
-	signal(SIGINT, stop);
-	signal(SIGUSR1, sig_usr1);
-
-	if (mode == 'P') {
-		/*
-		 * Producer
-		 */
-		char buf[2048];
-		int sendcnt = 0;
-
-		/* Set up a message delivery report callback.
-		 * It will be called once for each message, either on successful
-		 * delivery to broker, or upon failure to deliver to broker. */
-
-                /* If offset reporting (-o report) is enabled, use the
-                 * richer dr_msg_cb instead. */
-                if (report_offsets) {
-                        rd_kafka_topic_conf_set(topic_conf,
-                                                "produce.offset.report",
-                                                "true", errstr, sizeof(errstr));
-                        rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered2);
-                } else
-                        rd_kafka_conf_set_dr_cb(conf, msg_delivered);
-
-		/* Create Kafka handle */
-		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
-					errstr, sizeof(errstr)))) {
-			fprintf(stderr,
-				"%% Failed to create new producer: %s\n",
-				errstr);
-			exit(1);
-		}
-
-		/* Add brokers */
-		if (rd_kafka_brokers_add(rk, brokers) == 0) {
-			fprintf(stderr, "%% No valid brokers specified\n");
-			exit(1);
-		}
-
-		/* Create topic */
-		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
-                topic_conf = NULL; /* Now owned by topic */
-
-		if (!quiet)
-			fprintf(stderr,
-				"%% Type stuff and hit enter to send\n");
-
-		while (run && fgets(buf, sizeof(buf), stdin)) {
-			size_t len = strlen(buf);
-			if (buf[len-1] == '\n')
-				buf[--len] = '\0';
-
-			/* Send/Produce message. */
-                        if (hdrs) {
-                                rd_kafka_headers_t *hdrs_copy;
-
-                                hdrs_copy = rd_kafka_headers_copy(hdrs);
-
-                                err = rd_kafka_producev(
-                                        rk,
-                                        RD_KAFKA_V_RKT(rkt),
-                                        RD_KAFKA_V_PARTITION(partition),
-                                        RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
-                                        RD_KAFKA_V_VALUE(buf, len),
-                                        RD_KAFKA_V_HEADERS(hdrs_copy),
-                                        RD_KAFKA_V_END);
-
-                                if (err)
-                                        rd_kafka_headers_destroy(hdrs_copy);
-
-                        } else {
-                                if (rd_kafka_produce(
-                                            rkt, partition,
-                                            RD_KAFKA_MSG_F_COPY,
-                                            /* Payload and length */
-                                            buf, len,
-                                            /* Optional key and its length */
-                                            NULL, 0,
-                                            /* Message opaque, provided in
-                                             * delivery report callback as
-                                             * msg_opaque. */
-                                            NULL) == -1) {
-                                        err = rd_kafka_last_error();
-                                }
-                        }
-
-                        if (err) {
-                                fprintf(stderr,
-                                        "%% Failed to produce to topic %s "
-					"partition %i: %s\n",
-					rd_kafka_topic_name(rkt), partition,
-					rd_kafka_err2str(err));
-
-				/* Poll to handle delivery reports */
-				rd_kafka_poll(rk, 0);
-				continue;
-			}
-
-			if (!quiet)
-				fprintf(stderr, "%% Sent %zd bytes to topic "
-					"%s partition %i\n",
-				len, rd_kafka_topic_name(rkt), partition);
-			sendcnt++;
-			/* Poll to handle delivery reports */
-			rd_kafka_poll(rk, 0);
-		}
-
-		/* Poll to handle delivery reports */
-		rd_kafka_poll(rk, 0);
-
-		/* Wait for messages to be delivered */
-		while (run && rd_kafka_outq_len(rk) > 0)
-			rd_kafka_poll(rk, 100);
-
-		/* Destroy topic */
-		rd_kafka_topic_destroy(rkt);
-
-		/* Destroy the handle */
-		rd_kafka_destroy(rk);
-
-	} else if (mode == 'C') {
-		/*
-		 * Consumer
-		 */
-
-		/* Create Kafka handle */
-		if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
-					errstr, sizeof(errstr)))) {
-			fprintf(stderr,
-				"%% Failed to create new consumer: %s\n",
-				errstr);
-			exit(1);
-		}
-
-		/* Add brokers */
-		if (rd_kafka_brokers_add(rk, brokers) == 0) {
-			fprintf(stderr, "%% No valid brokers specified\n");
-			exit(1);
-		}
-
-		if (get_wmarks) {
-			int64_t lo, hi;
-                        rd_kafka_resp_err_t err;
-
-			/* Only query for hi&lo partition watermarks */
-
-			if ((err = rd_kafka_query_watermark_offsets(
-				     rk, topic, partition, &lo, &hi, 5000))) {
-				fprintf(stderr, "%% query_watermark_offsets() "
-					"failed: %s\n",
-					rd_kafka_err2str(err));
-				exit(1);
-			}
-
-			printf("%s [%d]: low - high offsets: "
-			       "%"PRId64" - %"PRId64"\n",
-			       topic, partition, lo, hi);
-
-			rd_kafka_destroy(rk);
-			exit(0);
-		}
-
-
-		/* Create topic */
-		rkt = rd_kafka_topic_new(rk, topic, topic_conf);
-                topic_conf = NULL; /* Now owned by topic */
-
-		/* Start consuming */
-		if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){
-			rd_kafka_resp_err_t err = rd_kafka_last_error();
-			fprintf(stderr, "%% Failed to start consuming: %s\n",
-				rd_kafka_err2str(err));
-                        if (err == RD_KAFKA_RESP_ERR__INVALID_ARG)
-                                fprintf(stderr,
-                                        "%% Broker based offset storage "
-                                        "requires a group.id, "
-                                        "add: -X group.id=yourGroup\n");
-			exit(1);
-		}
-
-		while (run) {
-			rd_kafka_message_t *rkmessage;
-                        rd_kafka_resp_err_t err;
-
-                        /* Poll for errors, etc. */
-                        rd_kafka_poll(rk, 0);
-
-			/* Consume single message.
-			 * See rdkafka_performance.c for high speed
-			 * consuming of messages. */
-			rkmessage = rd_kafka_consume(rkt, partition, 1000);
-			if (!rkmessage) /* timeout */
-				continue;
-
-			msg_consume(rkmessage, NULL);
-
-			/* Return message to rdkafka */
-			rd_kafka_message_destroy(rkmessage);
-
-                        if (seek_offset) {
-                                err = rd_kafka_seek(rkt, partition, seek_offset,
-                                                    2000);
-                                if (err)
-                                        printf("Seek failed: %s\n",
-                                               rd_kafka_err2str(err));
-                                else
-                                        printf("Seeked to %"PRId64"\n",
-                                               seek_offset);
-                                seek_offset = 0;
-                        }
-		}
-
-		/* Stop consuming */
-		rd_kafka_consume_stop(rkt, partition);
-
-                while (rd_kafka_outq_len(rk) > 0)
-                        rd_kafka_poll(rk, 10);
-
-		/* Destroy topic */
-		rd_kafka_topic_destroy(rkt);
-
-		/* Destroy handle */
-		rd_kafka_destroy(rk);
-
-        } else if (mode == 'L') {
-                rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
-		/* Create Kafka handle */
-		if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
-					errstr, sizeof(errstr)))) {
-			fprintf(stderr,
-				"%% Failed to create new producer: %s\n",
-				errstr);
-			exit(1);
-		}
-
-		/* Add brokers */
-		if (rd_kafka_brokers_add(rk, brokers) == 0) {
-			fprintf(stderr, "%% No valid brokers specified\n");
-			exit(1);
-		}
-
-                /* Create topic */
-                if (topic) {
-                        rkt = rd_kafka_topic_new(rk, topic, topic_conf);
-                        topic_conf = NULL; /* Now owned by topic */
-                } else
-                        rkt = NULL;
-
-                while (run) {
-                        const struct rd_kafka_metadata *metadata;
-
-                        /* Fetch metadata */
-                        err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt,
-                                                &metadata, 5000);
-                        if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
-                                fprintf(stderr,
-                                        "%% Failed to acquire metadata: %s\n",
-                                        rd_kafka_err2str(err));
-                                run = 0;
-                                break;
-                        }
-
-                        metadata_print(topic, metadata);
-
-                        rd_kafka_metadata_destroy(metadata);
-                        run = 0;
-                }
-
-		/* Destroy topic */
-		if (rkt)
-			rd_kafka_topic_destroy(rkt);
-
-		/* Destroy the handle */
-		rd_kafka_destroy(rk);
-
-                if (topic_conf)
-                        rd_kafka_topic_conf_destroy(topic_conf);
-
-
-                /* Exit right away, dont wait for background cleanup, we haven't
-                 * done anything important anyway. */
-                exit(err ? 2 : 0);
-        }
-
-        if (hdrs)
-                rd_kafka_headers_destroy(hdrs);
-
-        if (topic_conf)
-                rd_kafka_topic_conf_destroy(topic_conf);
-
-	/* Let background threads clean up and terminate cleanly. */
-	run = 5;
-	while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1)
-		printf("Waiting for librdkafka to decommission\n");
-	if (run <= 0)
-		rd_kafka_dump(stdout, rk);
-
-	return 0;
-}
diff --git a/thirdparty/librdkafka-0.11.4/examples/rdkafka_example.cpp b/thirdparty/librdkafka-0.11.4/examples/rdkafka_example.cpp
deleted file mode 100644
index 30d0d0e..0000000
--- a/thirdparty/librdkafka-0.11.4/examples/rdkafka_example.cpp
+++ /dev/null
@@ -1,645 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2014, Magnus Edenhill
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met: 
- * 
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer. 
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution. 
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Apache Kafka consumer & producer example programs
- * using the Kafka driver from librdkafka
- * (https://github.com/edenhill/librdkafka)
- */
-
-#include <iostream>
-#include <string>
-#include <cstdlib>
-#include <cstdio>
-#include <csignal>
-#include <cstring>
-
-#ifdef _MSC_VER
-#include "../win32/wingetopt.h"
-#elif _AIX
-#include <unistd.h>
-#else
-#include <getopt.h>
-#endif
-
-/*
- * Typically include path in a real application would be
- * #include <librdkafka/rdkafkacpp.h>
- */
-#include "rdkafkacpp.h"
-
-
-static void metadata_print (const std::string &topic,
-                            const RdKafka::Metadata *metadata) {
-  std::cout << "Metadata for " << (topic.empty() ? "" : "all topics")
-           << "(from broker "  << metadata->orig_broker_id()
-           << ":" << metadata->orig_broker_name() << std::endl;
-
-  /* Iterate brokers */
-  std::cout << " " << metadata->brokers()->size() << " brokers:" << std::endl;
-  RdKafka::Metadata::BrokerMetadataIterator ib;
-  for (ib = metadata->brokers()->begin();
-       ib != metadata->brokers()->end();
-       ++ib) {
-    std::cout << "  broker " << (*ib)->id() << " at "
-              << (*ib)->host() << ":" << (*ib)->port() << std::endl;
-  }
-  /* Iterate topics */
-  std::cout << metadata->topics()->size() << " topics:" << std::endl;
-  RdKafka::Metadata::TopicMetadataIterator it;
-  for (it = metadata->topics()->begin();
-       it != metadata->topics()->end();
-       ++it) {
-    std::cout << "  topic \""<< (*it)->topic() << "\" with "
-              << (*it)->partitions()->size() << " partitions:";
-
-    if ((*it)->err() != RdKafka::ERR_NO_ERROR) {
-      std::cout << " " << err2str((*it)->err());
-      if ((*it)->err() == RdKafka::ERR_LEADER_NOT_AVAILABLE)
-        std::cout << " (try again)";
-    }
-    std::cout << std::endl;
-
-    /* Iterate topic's partitions */
-    RdKafka::TopicMetadata::PartitionMetadataIterator ip;
-    for (ip = (*it)->partitions()->begin();
-         ip != (*it)->partitions()->end();
-         ++ip) {
-      std::cout << "    partition " << (*ip)->id()
-                << ", leader " << (*ip)->leader()
-                << ", replicas: ";
-
-      /* Iterate partition's replicas */
-      RdKafka::PartitionMetadata::ReplicasIterator ir;
-      for (ir = (*ip)->replicas()->begin();
-           ir != (*ip)->replicas()->end();
-           ++ir) {
-        std::cout << (ir == (*ip)->replicas()->begin() ? "":",") << *ir;
-      }
-
-      /* Iterate partition's ISRs */
-      std::cout << ", isrs: ";
-      RdKafka::PartitionMetadata::ISRSIterator iis;
-      for (iis = (*ip)->isrs()->begin(); iis != (*ip)->isrs()->end() ; ++iis)
-        std::cout << (iis == (*ip)->isrs()->begin() ? "":",") << *iis;
-
-      if ((*ip)->err() != RdKafka::ERR_NO_ERROR)
-        std::cout << ", " << RdKafka::err2str((*ip)->err()) << std::endl;
-      else
-        std::cout << std::endl;
-    }
-  }
-}
-
-static bool run = true;
-static bool exit_eof = false;
-
-static void sigterm (int sig) {
-  run = false;
-}
-
-
-class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
- public:
-  void dr_cb (RdKafka::Message &message) {
-    std::cout << "Message delivery for (" << message.len() << " bytes): " <<
-        message.errstr() << std::endl;
-    if (message.key())
-      std::cout << "Key: " << *(message.key()) << ";" << std::endl;
-  }
-};
-
-
-class ExampleEventCb : public RdKafka::EventCb {
- public:
-  void event_cb (RdKafka::Event &event) {
-    switch (event.type())
-    {
-      case RdKafka::Event::EVENT_ERROR:
-        std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
-            event.str() << std::endl;
-        if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
-          run = false;
-        break;
-
-      case RdKafka::Event::EVENT_STATS:
-        std::cerr << "\"STATS\": " << event.str() << std::endl;
-        break;
-
-      case RdKafka::Event::EVENT_LOG:
-        fprintf(stderr, "LOG-%i-%s: %s\n",
-                event.severity(), event.fac().c_str(), event.str().c_str());
-        break;
-
-      default:
-        std::cerr << "EVENT " << event.type() <<
-            " (" << RdKafka::err2str(event.err()) << "): " <<
-            event.str() << std::endl;
-        break;
-    }
-  }
-};
-
-
-/* Use of this partitioner is pretty pointless since no key is provided
- * in the produce() call. */
-class MyHashPartitionerCb : public RdKafka::PartitionerCb {
- public:
-  int32_t partitioner_cb (const RdKafka::Topic *topic, const std::string *key,
-                          int32_t partition_cnt, void *msg_opaque) {
-    return djb_hash(key->c_str(), key->size()) % partition_cnt;
-  }
- private:
-
-  static inline unsigned int djb_hash (const char *str, size_t len) {
-    unsigned int hash = 5381;
-    for (size_t i = 0 ; i < len ; i++)
-      hash = ((hash << 5) + hash) + str[i];
-    return hash;
-  }
-};
-
-void msg_consume(RdKafka::Message* message, void* opaque) {
-  switch (message->err()) {
-    case RdKafka::ERR__TIMED_OUT:
-      break;
-
-    case RdKafka::ERR_NO_ERROR:
-      /* Real message */
-      std::cout << "Read msg at offset " << message->offset() << std::endl;
-      if (message->key()) {
-        std::cout << "Key: " << *message->key() << std::endl;
-      }
-      printf("%.*s\n",
-        static_cast<int>(message->len()),
-        static_cast<const char *>(message->payload()));
-      break;
-
-    case RdKafka::ERR__PARTITION_EOF:
-      /* Last message */
-      if (exit_eof) {
-        run = false;
-      }
-      break;
-
-    case RdKafka::ERR__UNKNOWN_TOPIC:
-    case RdKafka::ERR__UNKNOWN_PARTITION:
-      std::cerr << "Consume failed: " << message->errstr() << std::endl;
-      run = false;
-      break;
-
-    default:
-      /* Errors */
-      std::cerr << "Consume failed: " << message->errstr() << std::endl;
-      run = false;
-  }
-}
-
-
-class ExampleConsumeCb : public RdKafka::ConsumeCb {
- public:
-  void consume_cb (RdKafka::Message &msg, void *opaque) {
-    msg_consume(&msg, opaque);
-  }
-};
-
-
-
-int main (int argc, char **argv) {
-  std::string brokers = "localhost";
-  std::string errstr;
-  std::string topic_str;
-  std::string mode;
-  std::string debug;
-  int32_t partition = RdKafka::Topic::PARTITION_UA;
-  int64_t start_offset = RdKafka::Topic::OFFSET_BEGINNING;
-  bool do_conf_dump = false;
-  int opt;
-  MyHashPartitionerCb hash_partitioner;
-  int use_ccb = 0;
-
-  /*
-   * Create configuration objects
-   */
-  RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
-  RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
-
-
-  while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:AM:f:")) != -1) {
-    switch (opt) {
-    case 'P':
-    case 'C':
-    case 'L':
-      mode = opt;
-      break;
-    case 't':
-      topic_str = optarg;
-      break;
-    case 'p':
-      if (!strcmp(optarg, "random"))
-        /* default */;
-      else if (!strcmp(optarg, "hash")) {
-        if (tconf->set("partitioner_cb", &hash_partitioner, errstr) !=
-            RdKafka::Conf::CONF_OK) {
-          std::cerr << errstr << std::endl;
-          exit(1);
-        }
-      } else
-        partition = std::atoi(optarg);
-      break;
-    case 'b':
-      brokers = optarg;
-      break;
-    case 'z':
-      if (conf->set("compression.codec", optarg, errstr) !=
-	  RdKafka::Conf::CONF_OK) {
-	std::cerr << errstr << std::endl;
-	exit(1);
-      }
-      break;
-    case 'o':
-      if (!strcmp(optarg, "end"))
-	start_offset = RdKafka::Topic::OFFSET_END;
-      else if (!strcmp(optarg, "beginning"))
-	start_offset = RdKafka::Topic::OFFSET_BEGINNING;
-      else if (!strcmp(optarg, "stored"))
-	start_offset = RdKafka::Topic::OFFSET_STORED;
-      else
-	start_offset = strtoll(optarg, NULL, 10);
-      break;
-    case 'e':
-      exit_eof = true;
-      break;
-    case 'd':
-      debug = optarg;
-      break;
-    case 'M':
-      if (conf->set("statistics.interval.ms", optarg, errstr) !=
-          RdKafka::Conf::CONF_OK) {
-        std::cerr << errstr << std::endl;
-        exit(1);
-      }
-      break;
-    case 'X':
-      {
-	char *name, *val;
-
-	if (!strcmp(optarg, "dump")) {
-	  do_conf_dump = true;
-	  continue;
-	}
-
-	name = optarg;
-	if (!(val = strchr(name, '='))) {
-          std::cerr << "%% Expected -X property=value, not " <<
-              name << std::endl;
-	  exit(1);
-	}
-
-	*val = '\0';
-	val++;
-
-	/* Try "topic." prefixed properties on topic
-	 * conf first, and then fall through to global if
-	 * it didnt match a topic configuration property. */
-        RdKafka::Conf::ConfResult res;
-	if (!strncmp(name, "topic.", strlen("topic.")))
-          res = tconf->set(name+strlen("topic."), val, errstr);
-        else
-	  res = conf->set(name, val, errstr);
-
-	if (res != RdKafka::Conf::CONF_OK) {
-          std::cerr << errstr << std::endl;
-	  exit(1);
-	}
-      }
-      break;
-
-      case 'f':
-        if (!strcmp(optarg, "ccb"))
-          use_ccb = 1;
-        else {
-          std::cerr << "Unknown option: " << optarg << std::endl;
-          exit(1);
-        }
-        break;
-
-    default:
-      goto usage;
-    }
-  }
-
-  if (mode.empty() || (topic_str.empty() && mode != "L") || optind != argc) {
-  usage:
-	  std::string features;
-	  conf->get("builtin.features", features);
-    fprintf(stderr,
-            "Usage: %s [-C|-P] -t <topic> "
-            "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n"
-            "\n"
-            "librdkafka version %s (0x%08x, builtin.features \"%s\")\n"
-            "\n"
-            " Options:\n"
-            "  -C | -P         Consumer or Producer mode\n"
-            "  -L              Metadata list mode\n"
-            "  -t <topic>      Topic to fetch / produce\n"
-            "  -p <num>        Partition (random partitioner)\n"
-            "  -p <func>       Use partitioner:\n"
-            "                  random (default), hash\n"
-            "  -b <brokers>    Broker address (localhost:9092)\n"
-            "  -z <codec>      Enable compression:\n"
-            "                  none|gzip|snappy\n"
-            "  -o <offset>     Start offset (consumer)\n"
-            "  -e              Exit consumer when last message\n"
-            "                  in partition has been received.\n"
-            "  -d [facs..]     Enable debugging contexts:\n"
-            "                  %s\n"
-            "  -M <intervalms> Enable statistics\n"
-            "  -X <prop=name>  Set arbitrary librdkafka "
-            "configuration property\n"
-            "                  Properties prefixed with \"topic.\" "
-            "will be set on topic object.\n"
-            "                  Use '-X list' to see the full list\n"
-            "                  of supported properties.\n"
-            "  -f <flag>       Set option:\n"
-            "                     ccb - use consume_callback\n"
-            "\n"
-            " In Consumer mode:\n"
-            "  writes fetched messages to stdout\n"
-            " In Producer mode:\n"
-            "  reads messages from stdin and sends to broker\n"
-            "\n"
-            "\n"
-            "\n",
-	    argv[0],
-	    RdKafka::version_str().c_str(), RdKafka::version(),
-		features.c_str(),
-	    RdKafka::get_debug_contexts().c_str());
-	exit(1);
-  }
-
-
-  /*
-   * Set configuration properties
-   */
-  conf->set("metadata.broker.list", brokers, errstr);
-
-  if (!debug.empty()) {
-    if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) {
-      std::cerr << errstr << std::endl;
-      exit(1);
-    }
-  }
-
-  ExampleEventCb ex_event_cb;
-  conf->set("event_cb", &ex_event_cb, errstr);
-
-  if (do_conf_dump) {
-    int pass;
-
-    for (pass = 0 ; pass < 2 ; pass++) {
-      std::list<std::string> *dump;
-      if (pass == 0) {
-        dump = conf->dump();
-        std::cout << "# Global config" << std::endl;
-      } else {
-        dump = tconf->dump();
-        std::cout << "# Topic config" << std::endl;
-      }
-
-      for (std::list<std::string>::iterator it = dump->begin();
-           it != dump->end(); ) {
-        std::cout << *it << " = ";
-        it++;
-        std::cout << *it << std::endl;
-        it++;
-      }
-      std::cout << std::endl;
-    }
-    exit(0);
-  }
-
-  signal(SIGINT, sigterm);
-  signal(SIGTERM, sigterm);
-
-
-  if (mode == "P") {
-    /*
-     * Producer mode
-     */
-
-    if(topic_str.empty())
-      goto usage;
-
-    ExampleDeliveryReportCb ex_dr_cb;
-
-    /* Set delivery report callback */
-    conf->set("dr_cb", &ex_dr_cb, errstr);
-
-    /*
-     * Create producer using accumulated global configuration.
-     */
-    RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
-    if (!producer) {
-      std::cerr << "Failed to create producer: " << errstr << std::endl;
-      exit(1);
-    }
-
-    std::cout << "% Created producer " << producer->name() << std::endl;
-
-    /*
-     * Create topic handle.
-     */
-    RdKafka::Topic *topic = RdKafka::Topic::create(producer, topic_str,
-						   tconf, errstr);
-    if (!topic) {
-      std::cerr << "Failed to create topic: " << errstr << std::endl;
-      exit(1);
-    }
-
-    /*
-     * Read messages from stdin and produce to broker.
-     */
-    for (std::string line; run && std::getline(std::cin, line);) {
-      if (line.empty()) {
-        producer->poll(0);
-	continue;
-      }
-
-      /*
-       * Produce message
-       */
-      RdKafka::ErrorCode resp =
-	producer->produce(topic, partition,
-			  RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
-			  const_cast<char *>(line.c_str()), line.size(),
-			  NULL, NULL);
-      if (resp != RdKafka::ERR_NO_ERROR)
-	std::cerr << "% Produce failed: " <<
-	  RdKafka::err2str(resp) << std::endl;
-      else
-	std::cerr << "% Produced message (" << line.size() << " bytes)" <<
-	  std::endl;
-
-      producer->poll(0);
-    }
-    run = true;
-
-    while (run && producer->outq_len() > 0) {
-      std::cerr << "Waiting for " << producer->outq_len() << std::endl;
-      producer->poll(1000);
-    }
-
-    delete topic;
-    delete producer;
-
-
-  } else if (mode == "C") {
-    /*
-     * Consumer mode
-     */
-
-    if(topic_str.empty())
-      goto usage;
-
-    /*
-     * Create consumer using accumulated global configuration.
-     */
-    RdKafka::Consumer *consumer = RdKafka::Consumer::create(conf, errstr);
-    if (!consumer) {
-      std::cerr << "Failed to create consumer: " << errstr << std::endl;
-      exit(1);
-    }
-
-    std::cout << "% Created consumer " << consumer->name() << std::endl;
-
-    /*
-     * Create topic handle.
-     */
-    RdKafka::Topic *topic = RdKafka::Topic::create(consumer, topic_str,
-						   tconf, errstr);
... 81198 lines suppressed ...