You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nifi.apache.org by bq...@apache.org on 2017/11/16 21:15:58 UTC

[28/42] nifi-minifi-cpp git commit: MINIFICPP-274: PutKafka Processor

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/9f66960e/thirdparty/librdkafka-0.11.1/src/queue.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/queue.h b/thirdparty/librdkafka-0.11.1/src/queue.h
new file mode 100644
index 0000000..d1ba148
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.1/src/queue.h
@@ -0,0 +1,850 @@
+/*	$NetBSD: queue.h,v 1.68 2014/11/19 08:10:01 uebayasi Exp $	*/
+
+/*
+ * Copyright (c) 1991, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)queue.h	8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef	_SYS_QUEUE_H_
+#define	_SYS_QUEUE_H_
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The
+ * elements are singly linked for minimum space and pointer manipulation
+ * overhead at the expense of O(n) removal for arbitrary elements. New
+ * elements can be added to the list after an existing element or at the
+ * head of the list.  Elements being removed from the head of the list
+ * should use the explicit macro for this purpose for optimum
+ * efficiency. A singly-linked list may only be traversed in the forward
+ * direction.  Singly-linked lists are ideal for applications with large
+ * datasets and few or no removals or for implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+/*
+ * Include the definition of NULL only on NetBSD because sys/null.h
+ * is not available elsewhere.  This conditional makes the header
+ * portable and it can simply be dropped verbatim into any system.
+ * The caveat is that on other systems some other header
+ * must provide NULL before the macros can be used.
+ */
+#ifdef __NetBSD__
+#include <sys/null.h>
+#endif
+
+#if defined(QUEUEDEBUG)
+# if defined(_KERNEL)
+#  define QUEUEDEBUG_ABORT(...) panic(__VA_ARGS__)
+# else
+#  include <err.h>
+#  define QUEUEDEBUG_ABORT(...) err(1, __VA_ARGS__)
+# endif
+#endif
+
+/*
+ * Singly-linked List definitions.
+ */
+#define	SLIST_HEAD(name, type)						\
+struct name {								\
+	struct type *slh_first;	/* first element */			\
+}
+
+#define	SLIST_HEAD_INITIALIZER(head)					\
+	{ NULL }
+
+#define	SLIST_ENTRY(type)						\
+struct {								\
+	struct type *sle_next;	/* next element */			\
+}
+
+/*
+ * Singly-linked List access methods.
+ */
+#define	SLIST_FIRST(head)	((head)->slh_first)
+#define	SLIST_END(head)		NULL
+#define	SLIST_EMPTY(head)	((head)->slh_first == NULL)
+#define	SLIST_NEXT(elm, field)	((elm)->field.sle_next)
+
+#define	SLIST_FOREACH(var, head, field)					\
+	for((var) = (head)->slh_first;					\
+	    (var) != SLIST_END(head);					\
+	    (var) = (var)->field.sle_next)
+
+#define	SLIST_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = SLIST_FIRST((head));				\
+	    (var) != SLIST_END(head) &&					\
+	    ((tvar) = SLIST_NEXT((var), field), 1);			\
+	    (var) = (tvar))
+
+/*
+ * Singly-linked List functions.
+ */
+#define	SLIST_INIT(head) do {						\
+	(head)->slh_first = SLIST_END(head);				\
+} while (/*CONSTCOND*/0)
+
+#define	SLIST_INSERT_AFTER(slistelm, elm, field) do {			\
+	(elm)->field.sle_next = (slistelm)->field.sle_next;		\
+	(slistelm)->field.sle_next = (elm);				\
+} while (/*CONSTCOND*/0)
+
+#define	SLIST_INSERT_HEAD(head, elm, field) do {			\
+	(elm)->field.sle_next = (head)->slh_first;			\
+	(head)->slh_first = (elm);					\
+} while (/*CONSTCOND*/0)
+
+#define	SLIST_REMOVE_AFTER(slistelm, field) do {			\
+	(slistelm)->field.sle_next =					\
+	    SLIST_NEXT(SLIST_NEXT((slistelm), field), field);		\
+} while (/*CONSTCOND*/0)
+
+#define	SLIST_REMOVE_HEAD(head, field) do {				\
+	(head)->slh_first = (head)->slh_first->field.sle_next;		\
+} while (/*CONSTCOND*/0)
+
+#define	SLIST_REMOVE(head, elm, type, field) do {			\
+	if ((head)->slh_first == (elm)) {				\
+		SLIST_REMOVE_HEAD((head), field);			\
+	}								\
+	else {								\
+		struct type *curelm = (head)->slh_first;		\
+		while(curelm->field.sle_next != (elm))			\
+			curelm = curelm->field.sle_next;		\
+		curelm->field.sle_next =				\
+		    curelm->field.sle_next->field.sle_next;		\
+	}								\
+} while (/*CONSTCOND*/0)
+
+
+/*
+ * List definitions.
+ */
+#define	LIST_HEAD(name, type)						\
+struct name {								\
+	struct type *lh_first;	/* first element */			\
+}
+
+#define	LIST_HEAD_INITIALIZER(head)					\
+	{ NULL }
+
+#define	LIST_ENTRY(type)						\
+struct {								\
+	struct type *le_next;	/* next element */			\
+	struct type **le_prev;	/* address of previous next element */	\
+}
+
+/*
+ * List access methods.
+ */
+#define	LIST_FIRST(head)		((head)->lh_first)
+#define	LIST_END(head)			NULL
+#define	LIST_EMPTY(head)		((head)->lh_first == LIST_END(head))
+#define	LIST_NEXT(elm, field)		((elm)->field.le_next)
+
+#define	LIST_FOREACH(var, head, field)					\
+	for ((var) = ((head)->lh_first);				\
+	    (var) != LIST_END(head);					\
+	    (var) = ((var)->field.le_next))
+
+#define	LIST_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = LIST_FIRST((head));				\
+	    (var) != LIST_END(head) &&					\
+	    ((tvar) = LIST_NEXT((var), field), 1);			\
+	    (var) = (tvar))
+
+#define	LIST_MOVE(head1, head2) do {					\
+	LIST_INIT((head2));						\
+	if (!LIST_EMPTY((head1))) {					\
+		(head2)->lh_first = (head1)->lh_first;			\
+		LIST_INIT((head1));					\
+	}								\
+} while (/*CONSTCOND*/0)
+
+/*
+ * List functions.
+ */
+#if defined(QUEUEDEBUG)
+#define	QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)			\
+	if ((head)->lh_first &&						\
+	    (head)->lh_first->field.le_prev != &(head)->lh_first)	\
+		QUEUEDEBUG_ABORT("LIST_INSERT_HEAD %p %s:%d", (head),	\
+		    __FILE__, __LINE__);
+#define	QUEUEDEBUG_LIST_OP(elm, field)					\
+	if ((elm)->field.le_next &&					\
+	    (elm)->field.le_next->field.le_prev !=			\
+	    &(elm)->field.le_next)					\
+		QUEUEDEBUG_ABORT("LIST_* forw %p %s:%d", (elm),		\
+		    __FILE__, __LINE__);				\
+	if (*(elm)->field.le_prev != (elm))				\
+		QUEUEDEBUG_ABORT("LIST_* back %p %s:%d", (elm),		\
+		    __FILE__, __LINE__);
+#define	QUEUEDEBUG_LIST_POSTREMOVE(elm, field)				\
+	(elm)->field.le_next = (void *)1L;				\
+	(elm)->field.le_prev = (void *)1L;
+#else
+#define	QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
+#define	QUEUEDEBUG_LIST_OP(elm, field)
+#define	QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
+#endif
+
+#define	LIST_INIT(head) do {						\
+	(head)->lh_first = LIST_END(head);				\
+} while (/*CONSTCOND*/0)
+
+#define	LIST_INSERT_AFTER(listelm, elm, field) do {			\
+	QUEUEDEBUG_LIST_OP((listelm), field)				\
+	if (((elm)->field.le_next = (listelm)->field.le_next) != 	\
+	    LIST_END(head))						\
+		(listelm)->field.le_next->field.le_prev =		\
+		    &(elm)->field.le_next;				\
+	(listelm)->field.le_next = (elm);				\
+	(elm)->field.le_prev = &(listelm)->field.le_next;		\
+} while (/*CONSTCOND*/0)
+
+#define	LIST_INSERT_BEFORE(listelm, elm, field) do {			\
+	QUEUEDEBUG_LIST_OP((listelm), field)				\
+	(elm)->field.le_prev = (listelm)->field.le_prev;		\
+	(elm)->field.le_next = (listelm);				\
+	*(listelm)->field.le_prev = (elm);				\
+	(listelm)->field.le_prev = &(elm)->field.le_next;		\
+} while (/*CONSTCOND*/0)
+
+#define	LIST_INSERT_HEAD(head, elm, field) do {				\
+	QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field)		\
+	if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\
+		(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+	(head)->lh_first = (elm);					\
+	(elm)->field.le_prev = &(head)->lh_first;			\
+} while (/*CONSTCOND*/0)
+
+#define	LIST_REMOVE(elm, field) do {					\
+	QUEUEDEBUG_LIST_OP((elm), field)				\
+	if ((elm)->field.le_next != NULL)				\
+		(elm)->field.le_next->field.le_prev = 			\
+		    (elm)->field.le_prev;				\
+	*(elm)->field.le_prev = (elm)->field.le_next;			\
+	QUEUEDEBUG_LIST_POSTREMOVE((elm), field)			\
+} while (/*CONSTCOND*/0)
+
+#define LIST_REPLACE(elm, elm2, field) do {				\
+	if (((elm2)->field.le_next = (elm)->field.le_next) != NULL)	\
+		(elm2)->field.le_next->field.le_prev =			\
+		    &(elm2)->field.le_next;				\
+	(elm2)->field.le_prev = (elm)->field.le_prev;			\
+	*(elm2)->field.le_prev = (elm2);				\
+	QUEUEDEBUG_LIST_POSTREMOVE((elm), field)			\
+} while (/*CONSTCOND*/0)
+
+/*
+ * Simple queue definitions.
+ */
+#define	SIMPLEQ_HEAD(name, type)					\
+struct name {								\
+	struct type *sqh_first;	/* first element */			\
+	struct type **sqh_last;	/* addr of last next element */		\
+}
+
+#define	SIMPLEQ_HEAD_INITIALIZER(head)					\
+	{ NULL, &(head).sqh_first }
+
+#define	SIMPLEQ_ENTRY(type)						\
+struct {								\
+	struct type *sqe_next;	/* next element */			\
+}
+
+/*
+ * Simple queue access methods.
+ */
+#define	SIMPLEQ_FIRST(head)		((head)->sqh_first)
+#define	SIMPLEQ_END(head)		NULL
+#define	SIMPLEQ_EMPTY(head)		((head)->sqh_first == SIMPLEQ_END(head))
+#define	SIMPLEQ_NEXT(elm, field)	((elm)->field.sqe_next)
+
+#define	SIMPLEQ_FOREACH(var, head, field)				\
+	for ((var) = ((head)->sqh_first);				\
+	    (var) != SIMPLEQ_END(head);					\
+	    (var) = ((var)->field.sqe_next))
+
+#define	SIMPLEQ_FOREACH_SAFE(var, head, field, next)			\
+	for ((var) = ((head)->sqh_first);				\
+	    (var) != SIMPLEQ_END(head) &&				\
+	    ((next = ((var)->field.sqe_next)), 1);			\
+	    (var) = (next))
+
+/*
+ * Simple queue functions.
+ */
+#define	SIMPLEQ_INIT(head) do {						\
+	(head)->sqh_first = NULL;					\
+	(head)->sqh_last = &(head)->sqh_first;				\
+} while (/*CONSTCOND*/0)
+
+#define	SIMPLEQ_INSERT_HEAD(head, elm, field) do {			\
+	if (((elm)->field.sqe_next = (head)->sqh_first) == NULL)	\
+		(head)->sqh_last = &(elm)->field.sqe_next;		\
+	(head)->sqh_first = (elm);					\
+} while (/*CONSTCOND*/0)
+
+#define	SIMPLEQ_INSERT_TAIL(head, elm, field) do {			\
+	(elm)->field.sqe_next = NULL;					\
+	*(head)->sqh_last = (elm);					\
+	(head)->sqh_last = &(elm)->field.sqe_next;			\
+} while (/*CONSTCOND*/0)
+
+#define	SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
+		(head)->sqh_last = &(elm)->field.sqe_next;		\
+	(listelm)->field.sqe_next = (elm);				\
+} while (/*CONSTCOND*/0)
+
+#define	SIMPLEQ_REMOVE_HEAD(head, field) do {				\
+	if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
+		(head)->sqh_last = &(head)->sqh_first;			\
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_REMOVE_AFTER(head, elm, field) do {			\
+	if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
+	    == NULL)							\
+		(head)->sqh_last = &(elm)->field.sqe_next;		\
+} while (/*CONSTCOND*/0)
+
+#define	SIMPLEQ_REMOVE(head, elm, type, field) do {			\
+	if ((head)->sqh_first == (elm)) {				\
+		SIMPLEQ_REMOVE_HEAD((head), field);			\
+	} else {							\
+		struct type *curelm = (head)->sqh_first;		\
+		while (curelm->field.sqe_next != (elm))			\
+			curelm = curelm->field.sqe_next;		\
+		if ((curelm->field.sqe_next =				\
+			curelm->field.sqe_next->field.sqe_next) == NULL) \
+			    (head)->sqh_last = &(curelm)->field.sqe_next; \
+	}								\
+} while (/*CONSTCOND*/0)
+
+#define	SIMPLEQ_CONCAT(head1, head2) do {				\
+	if (!SIMPLEQ_EMPTY((head2))) {					\
+		*(head1)->sqh_last = (head2)->sqh_first;		\
+		(head1)->sqh_last = (head2)->sqh_last;		\
+		SIMPLEQ_INIT((head2));					\
+	}								\
+} while (/*CONSTCOND*/0)
+
+#define	SIMPLEQ_LAST(head, type, field)					\
+	(SIMPLEQ_EMPTY((head)) ?						\
+		NULL :							\
+	        ((struct type *)(void *)				\
+		((char *)((head)->sqh_last) - offsetof(struct type, field))))
+
+/*
+ * Tail queue definitions.
+ */
+#define	_TAILQ_HEAD(name, type, qual)					\
+struct name {								\
+	qual type *tqh_first;		/* first element */		\
+	qual type *qual *tqh_last;	/* addr of last next element */	\
+}
+#define TAILQ_HEAD(name, type)	_TAILQ_HEAD(name, struct type,)
+
+#define	TAILQ_HEAD_INITIALIZER(head)					\
+	{ TAILQ_END(head), &(head).tqh_first }
+
+#define	_TAILQ_ENTRY(type, qual)					\
+struct {								\
+	qual type *tqe_next;		/* next element */		\
+	qual type *qual *tqe_prev;	/* address of previous next element */\
+}
+#define TAILQ_ENTRY(type)	_TAILQ_ENTRY(struct type,)
+
+/*
+ * Tail queue access methods.
+ */
+#define	TAILQ_FIRST(head)		((head)->tqh_first)
+#define	TAILQ_END(head)			(NULL)
+#define	TAILQ_NEXT(elm, field)		((elm)->field.tqe_next)
+#define	TAILQ_LAST(head, headname) \
+	(*(((struct headname *)((head)->tqh_last))->tqh_last))
+#define	TAILQ_PREV(elm, headname, field) \
+	(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define	TAILQ_EMPTY(head)		(TAILQ_FIRST(head) == TAILQ_END(head))
+
+
+#define	TAILQ_FOREACH(var, head, field)					\
+	for ((var) = ((head)->tqh_first);				\
+	    (var) != TAILQ_END(head);					\
+	    (var) = ((var)->field.tqe_next))
+
+#define	TAILQ_FOREACH_SAFE(var, head, field, next)			\
+	for ((var) = ((head)->tqh_first);				\
+	    (var) != TAILQ_END(head) &&					\
+	    ((next) = TAILQ_NEXT(var, field), 1); (var) = (next))
+
+#define	TAILQ_FOREACH_REVERSE(var, head, headname, field)		\
+	for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\
+	    (var) != TAILQ_END(head);					\
+	    (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
+
+#define	TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev)	\
+	for ((var) = TAILQ_LAST((head), headname);			\
+	    (var) != TAILQ_END(head) && 				\
+	    ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev))
+
+/*
+ * Tail queue functions.
+ */
+#if defined(QUEUEDEBUG)
+#define	QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)			\
+	if ((head)->tqh_first &&					\
+	    (head)->tqh_first->field.tqe_prev != &(head)->tqh_first)	\
+		QUEUEDEBUG_ABORT("TAILQ_INSERT_HEAD %p %s:%d", (head),	\
+		    __FILE__, __LINE__);
+#define	QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)			\
+	if (*(head)->tqh_last != NULL)					\
+		QUEUEDEBUG_ABORT("TAILQ_INSERT_TAIL %p %s:%d", (head),	\
+		    __FILE__, __LINE__);
+#define	QUEUEDEBUG_TAILQ_OP(elm, field)					\
+	if ((elm)->field.tqe_next &&					\
+	    (elm)->field.tqe_next->field.tqe_prev !=			\
+	    &(elm)->field.tqe_next)					\
+		QUEUEDEBUG_ABORT("TAILQ_* forw %p %s:%d", (elm),	\
+		    __FILE__, __LINE__);				\
+	if (*(elm)->field.tqe_prev != (elm))				\
+		QUEUEDEBUG_ABORT("TAILQ_* back %p %s:%d", (elm),	\
+		    __FILE__, __LINE__);
+#define	QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)			\
+	if ((elm)->field.tqe_next == NULL &&				\
+	    (head)->tqh_last != &(elm)->field.tqe_next)			\
+		QUEUEDEBUG_ABORT("TAILQ_PREREMOVE head %p elm %p %s:%d",\
+		    (head), (elm), __FILE__, __LINE__);
+#define	QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)				\
+	(elm)->field.tqe_next = (void *)1L;				\
+	(elm)->field.tqe_prev = (void *)1L;
+#else
+#define	QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
+#define	QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
+#define	QUEUEDEBUG_TAILQ_OP(elm, field)
+#define	QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
+#define	QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
+#endif
+
+#define	TAILQ_INIT(head) do {						\
+	(head)->tqh_first = TAILQ_END(head);				\
+	(head)->tqh_last = &(head)->tqh_first;				\
+} while (/*CONSTCOND*/0)
+
+#define	TAILQ_INSERT_HEAD(head, elm, field) do {			\
+	QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field)		\
+	if (((elm)->field.tqe_next = (head)->tqh_first) != TAILQ_END(head))\
+		(head)->tqh_first->field.tqe_prev =			\
+		    &(elm)->field.tqe_next;				\
+	else								\
+		(head)->tqh_last = &(elm)->field.tqe_next;		\
+	(head)->tqh_first = (elm);					\
+	(elm)->field.tqe_prev = &(head)->tqh_first;			\
+} while (/*CONSTCOND*/0)
+
+#define	TAILQ_INSERT_TAIL(head, elm, field) do {			\
+	QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field)		\
+	(elm)->field.tqe_next = TAILQ_END(head);			\
+	(elm)->field.tqe_prev = (head)->tqh_last;			\
+	*(head)->tqh_last = (elm);					\
+	(head)->tqh_last = &(elm)->field.tqe_next;			\
+} while (/*CONSTCOND*/0)
+
+#define	TAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	QUEUEDEBUG_TAILQ_OP((listelm), field)				\
+	if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != 	\
+	    TAILQ_END(head))						\
+		(elm)->field.tqe_next->field.tqe_prev = 		\
+		    &(elm)->field.tqe_next;				\
+	else								\
+		(head)->tqh_last = &(elm)->field.tqe_next;		\
+	(listelm)->field.tqe_next = (elm);				\
+	(elm)->field.tqe_prev = &(listelm)->field.tqe_next;		\
+} while (/*CONSTCOND*/0)
+
+#define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
+	QUEUEDEBUG_TAILQ_OP((listelm), field)				\
+	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
+	(elm)->field.tqe_next = (listelm);				\
+	*(listelm)->field.tqe_prev = (elm);				\
+	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
+} while (/*CONSTCOND*/0)
+
+#define	TAILQ_REMOVE(head, elm, field) do {				\
+	QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field)		\
+	QUEUEDEBUG_TAILQ_OP((elm), field)				\
+	if (((elm)->field.tqe_next) != TAILQ_END(head))			\
+		(elm)->field.tqe_next->field.tqe_prev = 		\
+		    (elm)->field.tqe_prev;				\
+	else								\
+		(head)->tqh_last = (elm)->field.tqe_prev;		\
+	*(elm)->field.tqe_prev = (elm)->field.tqe_next;			\
+	QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field);			\
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_REPLACE(head, elm, elm2, field) do {			\
+        if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != 	\
+	    TAILQ_END(head))   						\
+                (elm2)->field.tqe_next->field.tqe_prev =		\
+                    &(elm2)->field.tqe_next;				\
+        else								\
+                (head)->tqh_last = &(elm2)->field.tqe_next;		\
+        (elm2)->field.tqe_prev = (elm)->field.tqe_prev;			\
+        *(elm2)->field.tqe_prev = (elm2);				\
+	QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field);			\
+} while (/*CONSTCOND*/0)
+
+#define	TAILQ_CONCAT(head1, head2, field) do {				\
+	if (!TAILQ_EMPTY(head2)) {					\
+		*(head1)->tqh_last = (head2)->tqh_first;		\
+		(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last;	\
+		(head1)->tqh_last = (head2)->tqh_last;			\
+		TAILQ_INIT((head2));					\
+	}								\
+} while (/*CONSTCOND*/0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define	STAILQ_HEAD(name, type)						\
+struct name {								\
+	struct type *stqh_first;	/* first element */		\
+	struct type **stqh_last;	/* addr of last next element */	\
+}
+
+#define	STAILQ_HEAD_INITIALIZER(head)					\
+	{ NULL, &(head).stqh_first }
+
+#define	STAILQ_ENTRY(type)						\
+struct {								\
+	struct type *stqe_next;	/* next element */			\
+}
+
+/*
+ * Singly-linked Tail queue access methods.
+ */
+#define	STAILQ_FIRST(head)	((head)->stqh_first)
+#define	STAILQ_END(head)	NULL
+#define	STAILQ_NEXT(elm, field)	((elm)->field.stqe_next)
+#define	STAILQ_EMPTY(head)	(STAILQ_FIRST(head) == STAILQ_END(head))
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define	STAILQ_INIT(head) do {						\
+	(head)->stqh_first = NULL;					\
+	(head)->stqh_last = &(head)->stqh_first;				\
+} while (/*CONSTCOND*/0)
+
+#define	STAILQ_INSERT_HEAD(head, elm, field) do {			\
+	if (((elm)->field.stqe_next = (head)->stqh_first) == NULL)	\
+		(head)->stqh_last = &(elm)->field.stqe_next;		\
+	(head)->stqh_first = (elm);					\
+} while (/*CONSTCOND*/0)
+
+#define	STAILQ_INSERT_TAIL(head, elm, field) do {			\
+	(elm)->field.stqe_next = NULL;					\
+	*(head)->stqh_last = (elm);					\
+	(head)->stqh_last = &(elm)->field.stqe_next;			\
+} while (/*CONSTCOND*/0)
+
+#define	STAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
+		(head)->stqh_last = &(elm)->field.stqe_next;		\
+	(listelm)->field.stqe_next = (elm);				\
+} while (/*CONSTCOND*/0)
+
+#define	STAILQ_REMOVE_HEAD(head, field) do {				\
+	if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
+		(head)->stqh_last = &(head)->stqh_first;			\
+} while (/*CONSTCOND*/0)
+
+#define	STAILQ_REMOVE(head, elm, type, field) do {			\
+	if ((head)->stqh_first == (elm)) {				\
+		STAILQ_REMOVE_HEAD((head), field);			\
+	} else {							\
+		struct type *curelm = (head)->stqh_first;		\
+		while (curelm->field.stqe_next != (elm))			\
+			curelm = curelm->field.stqe_next;		\
+		if ((curelm->field.stqe_next =				\
+			curelm->field.stqe_next->field.stqe_next) == NULL) \
+			    (head)->stqh_last = &(curelm)->field.stqe_next; \
+	}								\
+} while (/*CONSTCOND*/0)
+
+#define	STAILQ_FOREACH(var, head, field)				\
+	for ((var) = ((head)->stqh_first);				\
+		(var);							\
+		(var) = ((var)->field.stqe_next))
+
+#define	STAILQ_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = STAILQ_FIRST((head));				\
+	    (var) && ((tvar) = STAILQ_NEXT((var), field), 1);		\
+	    (var) = (tvar))
+
+#define	STAILQ_CONCAT(head1, head2) do {				\
+	if (!STAILQ_EMPTY((head2))) {					\
+		*(head1)->stqh_last = (head2)->stqh_first;		\
+		(head1)->stqh_last = (head2)->stqh_last;		\
+		STAILQ_INIT((head2));					\
+	}								\
+} while (/*CONSTCOND*/0)
+
+#define	STAILQ_LAST(head, type, field)					\
+	(STAILQ_EMPTY((head)) ?						\
+		NULL :							\
+	        ((struct type *)(void *)				\
+		((char *)((head)->stqh_last) - offsetof(struct type, field))))
+
+
+#ifndef _KERNEL
+/*
+ * Circular queue definitions. Do not use. We still keep the macros
+ * for compatibility but because of pointer aliasing issues their use
+ * is discouraged!
+ */
+
+/*
+ * __launder_type():  We use this ugly hack to work around the the compiler
+ * noticing that two types may not alias each other and elide tests in code.
+ * We hit this in the CIRCLEQ macros when comparing 'struct name *' and
+ * 'struct type *' (see CIRCLEQ_HEAD()).  Modern compilers (such as GCC
+ * 4.8) declare these comparisons as always false, causing the code to
+ * not run as designed.
+ *
+ * This hack is only to be used for comparisons and thus can be fully const.
+ * Do not use for assignment.
+ *
+ * If we ever choose to change the ABI of the CIRCLEQ macros, we could fix
+ * this by changing the head/tail sentinal values, but see the note above
+ * this one.
+ */
+#ifdef _MSC_VER
+#define __launder_type(x)  ((const void *)(x))
+#else
+static inline const void * __launder_type(const void *);
+static inline const void *
+__launder_type(const void *__x)
+{
+	__asm __volatile("" : "+r" (__x));
+	return __x;
+}
+#endif
+
+#if defined(QUEUEDEBUG)
+#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)				\
+	if ((head)->cqh_first != CIRCLEQ_ENDC(head) &&			\
+	    (head)->cqh_first->field.cqe_prev != CIRCLEQ_ENDC(head))	\
+		QUEUEDEBUG_ABORT("CIRCLEQ head forw %p %s:%d", (head),	\
+		      __FILE__, __LINE__);				\
+	if ((head)->cqh_last != CIRCLEQ_ENDC(head) &&			\
+	    (head)->cqh_last->field.cqe_next != CIRCLEQ_ENDC(head))	\
+		QUEUEDEBUG_ABORT("CIRCLEQ head back %p %s:%d", (head),	\
+		      __FILE__, __LINE__);
+#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)			\
+	if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) {		\
+		if ((head)->cqh_last != (elm))				\
+			QUEUEDEBUG_ABORT("CIRCLEQ elm last %p %s:%d",	\
+			    (elm), __FILE__, __LINE__);			\
+	} else {							\
+		if ((elm)->field.cqe_next->field.cqe_prev != (elm))	\
+			QUEUEDEBUG_ABORT("CIRCLEQ elm forw %p %s:%d",	\
+			    (elm), __FILE__, __LINE__);			\
+	}								\
+	if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) {		\
+		if ((head)->cqh_first != (elm))				\
+			QUEUEDEBUG_ABORT("CIRCLEQ elm first %p %s:%d",	\
+			    (elm), __FILE__, __LINE__);			\
+	} else {							\
+		if ((elm)->field.cqe_prev->field.cqe_next != (elm))	\
+			QUEUEDEBUG_ABORT("CIRCLEQ elm prev %p %s:%d",	\
+			    (elm), __FILE__, __LINE__);			\
+	}
+#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)			\
+	(elm)->field.cqe_next = (void *)1L;				\
+	(elm)->field.cqe_prev = (void *)1L;
+#else
+#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
+#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
+#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
+#endif
+
+#define	CIRCLEQ_HEAD(name, type)					\
+struct name {								\
+	struct type *cqh_first;		/* first element */		\
+	struct type *cqh_last;		/* last element */		\
+}
+
+#define	CIRCLEQ_HEAD_INITIALIZER(head)					\
+	{ CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
+
+#define	CIRCLEQ_ENTRY(type)						\
+struct {								\
+	struct type *cqe_next;		/* next element */		\
+	struct type *cqe_prev;		/* previous element */		\
+}
+
+/*
+ * Circular queue functions.
+ */
+#define	CIRCLEQ_INIT(head) do {						\
+	(head)->cqh_first = CIRCLEQ_END(head);				\
+	(head)->cqh_last = CIRCLEQ_END(head);				\
+} while (/*CONSTCOND*/0)
+
+#define	CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
+	QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field)		\
+	(elm)->field.cqe_next = (listelm)->field.cqe_next;		\
+	(elm)->field.cqe_prev = (listelm);				\
+	if ((listelm)->field.cqe_next == CIRCLEQ_ENDC(head))		\
+		(head)->cqh_last = (elm);				\
+	else								\
+		(listelm)->field.cqe_next->field.cqe_prev = (elm);	\
+	(listelm)->field.cqe_next = (elm);				\
+} while (/*CONSTCOND*/0)
+
+#define	CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do {		\
+	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
+	QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field)		\
+	(elm)->field.cqe_next = (listelm);				\
+	(elm)->field.cqe_prev = (listelm)->field.cqe_prev;		\
+	if ((listelm)->field.cqe_prev == CIRCLEQ_ENDC(head))		\
+		(head)->cqh_first = (elm);				\
+	else								\
+		(listelm)->field.cqe_prev->field.cqe_next = (elm);	\
+	(listelm)->field.cqe_prev = (elm);				\
+} while (/*CONSTCOND*/0)
+
+#define	CIRCLEQ_INSERT_HEAD(head, elm, field) do {			\
+	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
+	(elm)->field.cqe_next = (head)->cqh_first;			\
+	(elm)->field.cqe_prev = CIRCLEQ_END(head);			\
+	if ((head)->cqh_last == CIRCLEQ_ENDC(head))			\
+		(head)->cqh_last = (elm);				\
+	else								\
+		(head)->cqh_first->field.cqe_prev = (elm);		\
+	(head)->cqh_first = (elm);					\
+} while (/*CONSTCOND*/0)
+
+#define	CIRCLEQ_INSERT_TAIL(head, elm, field) do {			\
+	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
+	(elm)->field.cqe_next = CIRCLEQ_END(head);			\
+	(elm)->field.cqe_prev = (head)->cqh_last;			\
+	if ((head)->cqh_first == CIRCLEQ_ENDC(head))			\
+		(head)->cqh_first = (elm);				\
+	else								\
+		(head)->cqh_last->field.cqe_next = (elm);		\
+	(head)->cqh_last = (elm);					\
+} while (/*CONSTCOND*/0)
+
+#define	CIRCLEQ_REMOVE(head, elm, field) do {				\
+	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
+	QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field)			\
+	if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head))		\
+		(head)->cqh_last = (elm)->field.cqe_prev;		\
+	else								\
+		(elm)->field.cqe_next->field.cqe_prev =			\
+		    (elm)->field.cqe_prev;				\
+	if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head))		\
+		(head)->cqh_first = (elm)->field.cqe_next;		\
+	else								\
+		(elm)->field.cqe_prev->field.cqe_next =			\
+		    (elm)->field.cqe_next;				\
+	QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field)			\
+} while (/*CONSTCOND*/0)
+
+#define	CIRCLEQ_FOREACH(var, head, field)				\
+	for ((var) = ((head)->cqh_first);				\
+		(var) != CIRCLEQ_ENDC(head);				\
+		(var) = ((var)->field.cqe_next))
+
+#define	CIRCLEQ_FOREACH_REVERSE(var, head, field)			\
+	for ((var) = ((head)->cqh_last);				\
+		(var) != CIRCLEQ_ENDC(head);				\
+		(var) = ((var)->field.cqe_prev))
+
+/*
+ * Circular queue access methods.
+ */
+#define	CIRCLEQ_FIRST(head)		((head)->cqh_first)
+#define	CIRCLEQ_LAST(head)		((head)->cqh_last)
+/* For comparisons */
+#define	CIRCLEQ_ENDC(head)		(__launder_type(head))
+/* For assignments */
+#define	CIRCLEQ_END(head)		((void *)(head))
+#define	CIRCLEQ_NEXT(elm, field)	((elm)->field.cqe_next)
+#define	CIRCLEQ_PREV(elm, field)	((elm)->field.cqe_prev)
+#define	CIRCLEQ_EMPTY(head)						\
+    (CIRCLEQ_FIRST(head) == CIRCLEQ_ENDC(head))
+
+#define CIRCLEQ_LOOP_NEXT(head, elm, field)				\
+	(((elm)->field.cqe_next == CIRCLEQ_ENDC(head))			\
+	    ? ((head)->cqh_first)					\
+	    : (elm->field.cqe_next))
+#define CIRCLEQ_LOOP_PREV(head, elm, field)				\
+	(((elm)->field.cqe_prev == CIRCLEQ_ENDC(head))			\
+	    ? ((head)->cqh_last)					\
+	    : (elm->field.cqe_prev))
+#endif /* !_KERNEL */
+
+#endif	/* !_SYS_QUEUE_H_ */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/9f66960e/thirdparty/librdkafka-0.11.1/src/rd.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rd.h b/thirdparty/librdkafka-0.11.1/src/rd.h
new file mode 100644
index 0000000..9c2700a
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.1/src/rd.h
@@ -0,0 +1,455 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met: 
+ * 
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer. 
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution. 
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+
+#pragma once
+
+#ifndef _MSC_VER
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE  /* for strndup() */
+#endif
+#define __need_IOV_MAX
+#ifndef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 200809L  /* for timespec on solaris */
+#endif
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <time.h>
+#include <assert.h>
+#include <limits.h>
+
+#include "tinycthread.h"
+#include "rdsysqueue.h"
+
+#ifdef _MSC_VER
+/* Visual Studio */
+#include "win32_config.h"
+#else
+/* POSIX / UNIX based systems */
+#include "../config.h" /* mklove output */
+#endif
+
+#ifdef _MSC_VER
+/* Win32/Visual Studio */
+#include "rdwin32.h"
+
+#else
+/* POSIX / UNIX based systems */
+#include "rdposix.h"
+#endif
+
+#include "rdtypes.h"
+
+
+/* Debug assert, only enabled with --enable-devel */
+#if ENABLE_DEVEL == 1
+#define rd_dassert(cond) rd_assert(cond)
+#else
+#define rd_dassert(cond)  do {} while (0)
+#endif
+
+
+/** Assert if reached */
+#define RD_NOTREACHED() rd_kafka_assert(NULL, !*"/* NOTREACHED */ violated")
+
+
+
+/**
+* Allocator wrappers.
+* We serve under the premise that if a (small) memory
+* allocation fails all hope is lost and the application
+* will fail anyway, so no need to handle it handsomely.
+*/
+static RD_INLINE RD_UNUSED void *rd_calloc(size_t num, size_t sz) {
+	void *p = calloc(num, sz);
+	rd_assert(p);
+	return p;
+}
+
+static RD_INLINE RD_UNUSED void *rd_malloc(size_t sz) {
+	void *p = malloc(sz);
+	rd_assert(p);
+	return p;
+}
+
+static RD_INLINE RD_UNUSED void *rd_realloc(void *ptr, size_t sz) {
+	void *p = realloc(ptr, sz);
+	rd_assert(p);
+	return p;
+}
+
+static RD_INLINE RD_UNUSED void rd_free(void *ptr) {
+	free(ptr);
+}
+
+static RD_INLINE RD_UNUSED char *rd_strdup(const char *s) {
+#ifndef _MSC_VER
+	char *n = strdup(s);
+#else
+	char *n = _strdup(s);
+#endif
+	rd_assert(n);
+	return n;
+}
+
+static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) {
+#if HAVE_STRNDUP
+	char *n = strndup(s, len);
+	rd_assert(n);
+#else
+	char *n = malloc(len + 1);
+	rd_assert(n);
+	memcpy(n, s, len);
+	n[len] = '\0';
+#endif
+	return n;
+}
+
+
+
+/*
+ * Portability
+ */
+
+#ifdef strndupa
+#define rd_strndupa(DESTPTR,PTR,LEN)  (*(DESTPTR) = strndupa(PTR,LEN))
+#else
+#define rd_strndupa(DESTPTR,PTR,LEN) (*(DESTPTR) = rd_alloca(LEN+1), \
+      memcpy(*(DESTPTR), (PTR), LEN), *((*(DESTPTR))+(LEN)) = 0)
+#endif
+
+#ifdef strdupa
+#define rd_strdupa(DESTPTR,PTR)  (*(DESTPTR) = strdupa(PTR))
+#else
+#define rd_strdupa(DESTPTR,PTR)  rd_strndupa(DESTPTR,PTR,strlen(PTR))
+#endif
+
+#ifndef IOV_MAX
+#ifdef __APPLE__
+/* Some versions of MacOSX dont have IOV_MAX */
+#define IOV_MAX 1024
+#elif defined(_MSC_VER)
+/* There is no IOV_MAX on MSVC but it is used internally in librdkafka */
+#define IOV_MAX 1024
+#else
+#error "IOV_MAX not defined"
+#endif
+#endif
+
+
+/* Round/align X upwards to STRIDE, which must be power of 2. */
+#define RD_ROUNDUP(X,STRIDE) (((X) + ((STRIDE) - 1)) & ~(STRIDE-1))
+
+#define RD_ARRAY_SIZE(A)          (sizeof((A)) / sizeof(*(A)))
+#define RD_ARRAYSIZE(A)           RD_ARRAY_SIZE(A)
+#define RD_SIZEOF(TYPE,MEMBER)    sizeof(((TYPE *)NULL)->MEMBER)
+#define RD_OFFSETOF(TYPE,MEMBER)  ((size_t) &(((TYPE *)NULL)->MEMBER))
+
+/**
+ * Returns the 'I'th array element from static sized array 'A'
+ * or NULL if 'I' is out of range.
+ * var-args is an optional prefix to provide the correct return type.
+ */
+#define RD_ARRAY_ELEM(A,I,...)				\
+	((unsigned int)(I) < RD_ARRAY_SIZE(A) ? __VA_ARGS__ (A)[(I)] : NULL)
+
+
+#define RD_STRINGIFY(X)  # X
+
+
+
+#define RD_MIN(a,b) ((a) < (b) ? (a) : (b))
+#define RD_MAX(a,b) ((a) > (b) ? (a) : (b))
+
+
+/**
+ * Cap an integer (of any type) to reside within the defined limit.
+ */
+#define RD_INT_CAP(val,low,hi) \
+	((val) < (low) ? low : ((val) > (hi) ? (hi) : (val)))
+
+
+
+/**
+ * Allocate 'size' bytes, copy 'src', return pointer to new memory.
+ *
+ * Use rd_free() to free the returned pointer.
+*/
+static RD_INLINE RD_UNUSED void *rd_memdup (const void *src, size_t size) {
+	void *dst = rd_malloc(size);
+	memcpy(dst, src, size);
+	return dst;
+}
+
+/**
+ * @brief Memset &OBJ to 0, does automatic sizeof(OBJ).
+ */
+#define RD_MEMZERO(OBJ) memset(&(OBJ), 0, sizeof(OBJ))
+
+
+/**
+ * Generic refcnt interface
+ */
+#ifndef _MSC_VER
+/* Mutexes (critical sections) are slow, even when uncontended, on Windows */
+#define RD_REFCNT_USE_LOCKS 1
+#endif
+
+#ifdef RD_REFCNT_USE_LOCKS
+typedef struct rd_refcnt_t {
+        mtx_t lock;
+        int v;
+} rd_refcnt_t;
+#else
+typedef rd_atomic32_t rd_refcnt_t;
+#endif
+
+#ifdef RD_REFCNT_USE_LOCKS
+static RD_INLINE RD_UNUSED int rd_refcnt_init (rd_refcnt_t *R, int v) {
+        int r;
+        mtx_init(&R->lock, mtx_plain);
+        mtx_lock(&R->lock);
+        r = R->v = v;
+        mtx_unlock(&R->lock);
+        return r;
+}
+#else
+#define rd_refcnt_init(R,v)  rd_atomic32_init(R, v)
+#endif
+
+#ifdef RD_REFCNT_USE_LOCKS
+static RD_INLINE RD_UNUSED void rd_refcnt_destroy (rd_refcnt_t *R) {
+        mtx_lock(&R->lock);
+        rd_assert(R->v == 0);
+        mtx_unlock(&R->lock);
+
+        mtx_destroy(&R->lock);
+}
+#else
+#define rd_refcnt_destroy(R) do { } while (0)
+#endif
+
+
+#ifdef RD_REFCNT_USE_LOCKS
+static RD_INLINE RD_UNUSED int rd_refcnt_set (rd_refcnt_t *R, int v) {
+        int r;
+        mtx_lock(&R->lock);
+        r = R->v = v;
+        mtx_unlock(&R->lock);
+        return r;
+}
+#else
+#define rd_refcnt_set(R,v)  rd_atomic32_set(R, v)
+#endif
+
+
+#ifdef RD_REFCNT_USE_LOCKS
+static RD_INLINE RD_UNUSED int rd_refcnt_add0 (rd_refcnt_t *R) {
+        int r;
+        mtx_lock(&R->lock);
+        r = ++(R->v);
+        mtx_unlock(&R->lock);
+        return r;
+}
+#else
+#define rd_refcnt_add0(R)  rd_atomic32_add(R, 1)
+#endif
+
+static RD_INLINE RD_UNUSED int rd_refcnt_sub0 (rd_refcnt_t *R) {
+        int r;
+#ifdef RD_REFCNT_USE_LOCKS
+        mtx_lock(&R->lock);
+        r = --(R->v);
+        mtx_unlock(&R->lock);
+#else
+        r = rd_atomic32_sub(R, 1);
+#endif
+        if (r < 0)
+                rd_assert(!*"refcnt sub-zero");
+        return r;
+}
+
+#ifdef RD_REFCNT_USE_LOCKS
+static RD_INLINE RD_UNUSED int rd_refcnt_get (rd_refcnt_t *R) {
+        int r;
+        mtx_lock(&R->lock);
+        r = R->v;
+        mtx_unlock(&R->lock);
+        return r;
+}
+#else
+#define rd_refcnt_get(R)   rd_atomic32_get(R)
+#endif
+
+/**
+ * A wrapper for decreasing refcount and calling a destroy function
+ * when refcnt reaches 0.
+ */
+#define rd_refcnt_destroywrapper(REFCNT,DESTROY_CALL) do {      \
+                if (rd_refcnt_sub(REFCNT) > 0)                  \
+                        break;                                  \
+                DESTROY_CALL;                                   \
+        } while (0)
+
+
+#define rd_refcnt_destroywrapper2(REFCNT,WHAT,DESTROY_CALL) do {        \
+                if (rd_refcnt_sub2(REFCNT,WHAT) > 0)                        \
+                        break;                                  \
+                DESTROY_CALL;                                   \
+        } while (0)
+
+#if ENABLE_REFCNT_DEBUG
+#define rd_refcnt_add(R)                                                \
+        (                                                               \
+                printf("REFCNT DEBUG: %-35s %d +1: %16p: %s:%d\n",      \
+                       #R, rd_refcnt_get(R), (R), __FUNCTION__,__LINE__), \
+                rd_refcnt_add0(R)                                       \
+                )
+
+#define rd_refcnt_add2(R,WHAT)  do {                                        \
+                printf("REFCNT DEBUG: %-35s %d +1: %16p: %16s: %s:%d\n",      \
+                       #R, rd_refcnt_get(R), (R), WHAT, __FUNCTION__,__LINE__), \
+                rd_refcnt_add0(R);                                      \
+        } while (0)
+
+
+#define rd_refcnt_sub2(R,WHAT) (                                            \
+                printf("REFCNT DEBUG: %-35s %d -1: %16p: %16s: %s:%d\n",      \
+                       #R, rd_refcnt_get(R), (R), WHAT, __FUNCTION__,__LINE__), \
+                rd_refcnt_sub0(R) )
+
+#define rd_refcnt_sub(R) (                                              \
+                printf("REFCNT DEBUG: %-35s %d -1: %16p: %s:%d\n",      \
+                       #R, rd_refcnt_get(R), (R), __FUNCTION__,__LINE__), \
+                rd_refcnt_sub0(R) )
+
+#else
+#define rd_refcnt_add(R)  rd_refcnt_add0(R)
+#define rd_refcnt_sub(R)  rd_refcnt_sub0(R)
+#endif
+
+
+
+#if !ENABLE_SHAREDPTR_DEBUG
+
+/**
+ * The non-debug version of shared_ptr is simply a reference counting interface
+ * without any additional costs and no indirections.
+ */
+
+#define RD_SHARED_PTR_TYPE(STRUCT_NAME,WRAPPED_TYPE) WRAPPED_TYPE
+
+
+#define rd_shared_ptr_get_src(FUNC,LINE,OBJ,REFCNT,SPTR_TYPE)	\
+        (rd_refcnt_add(REFCNT), (OBJ))
+#define rd_shared_ptr_get(OBJ,REFCNT,SPTR_TYPE)          \
+        (rd_refcnt_add(REFCNT), (OBJ))
+
+#define rd_shared_ptr_obj(SPTR) (SPTR)
+
+#define rd_shared_ptr_put(SPTR,REF,DESTRUCTOR)                  \
+                rd_refcnt_destroywrapper(REF,DESTRUCTOR)
+
+
+#else
+
+#define RD_SHARED_PTR_TYPE(STRUCT_NAME, WRAPPED_TYPE) \
+        struct STRUCT_NAME {                          \
+                LIST_ENTRY(rd_shptr0_s) link;         \
+                WRAPPED_TYPE *obj;                     \
+                rd_refcnt_t *ref;                     \
+                const char *typename;                 \
+                const char *func;                     \
+                int line;                             \
+        }
+
+
+
+/* Common backing struct compatible with RD_SHARED_PTR_TYPE() types */
+typedef RD_SHARED_PTR_TYPE(rd_shptr0_s, void) rd_shptr0_t;
+
+LIST_HEAD(rd_shptr0_head, rd_shptr0_s);
+extern struct rd_shptr0_head rd_shared_ptr_debug_list;
+extern mtx_t rd_shared_ptr_debug_mtx;
+
+static RD_INLINE RD_UNUSED RD_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+rd_shptr0_t *rd_shared_ptr_get0 (const char *func, int line,
+                                 const char *typename,
+                                 rd_refcnt_t *ref, void *obj) {
+        rd_shptr0_t *sptr = rd_calloc(1, sizeof(*sptr));
+        sptr->obj = obj;
+        sptr->ref = ref;
+        sptr->typename = typename;
+        sptr->func = func;
+        sptr->line = line;
+
+        mtx_lock(&rd_shared_ptr_debug_mtx);
+        LIST_INSERT_HEAD(&rd_shared_ptr_debug_list, sptr, link);
+        mtx_unlock(&rd_shared_ptr_debug_mtx);
+        return sptr;
+}
+
+#define rd_shared_ptr_get_src(FUNC,LINE,OBJ,REF,SPTR_TYPE)		\
+        (rd_refcnt_add(REF),                                            \
+         (SPTR_TYPE *)rd_shared_ptr_get0(FUNC,LINE, #SPTR_TYPE,REF,OBJ))
+#define rd_shared_ptr_get(OBJ,REF,SPTR_TYPE)	\
+	rd_shared_ptr_get_src(__FUNCTION__, __LINE__, OBJ, REF, SPTR_TYPE)
+
+
+
+#define rd_shared_ptr_obj(SPTR) (SPTR)->obj
+
+#define rd_shared_ptr_put(SPTR,REF,DESTRUCTOR) do {               \
+                if (rd_refcnt_sub(REF) == 0)                      \
+                        DESTRUCTOR;                               \
+                mtx_lock(&rd_shared_ptr_debug_mtx);               \
+                LIST_REMOVE(SPTR, link);                          \
+                mtx_unlock(&rd_shared_ptr_debug_mtx);             \
+                rd_free(SPTR);                                    \
+        } while (0)
+
+void rd_shared_ptrs_dump (void);
+#endif
+
+
+#define RD_IF_FREE(PTR,FUNC) do { if ((PTR)) FUNC(PTR); } while (0)
+
+
+/**
+ * @brief Utility types to hold memory,size tuple.
+ */
+
+typedef struct rd_chariov_s {
+        char  *ptr;
+        size_t size;
+} rd_chariov_t;

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/9f66960e/thirdparty/librdkafka-0.11.1/src/rdaddr.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdaddr.c b/thirdparty/librdkafka-0.11.1/src/rdaddr.c
new file mode 100644
index 0000000..69625e4
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.1/src/rdaddr.c
@@ -0,0 +1,220 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met: 
+ * 
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer. 
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution. 
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "rd.h"
+#include "rdaddr.h"
+#include "rdrand.h"
+
+#ifdef _MSC_VER
+#include <WS2tcpip.h>
+#endif
+
+const char *rd_sockaddr2str (const void *addr, int flags) {
+	const rd_sockaddr_inx_t *a = (const rd_sockaddr_inx_t *)addr;
+	static RD_TLS char ret[32][INET6_ADDRSTRLEN + 16];
+	static RD_TLS int  reti = 0;
+	char portstr[64];
+	int of = 0;
+	int niflags = NI_NUMERICSERV;
+
+	reti = (reti + 1) % 32;
+	
+	switch (a->sinx_family)
+	{
+	case AF_INET:
+	case AF_INET6:
+		if (flags & RD_SOCKADDR2STR_F_FAMILY)
+			of += rd_snprintf(&ret[reti][of], sizeof(ret[reti])-of, "ipv%i#",
+				      a->sinx_family == AF_INET ? 4 : 6);
+
+		if ((flags & RD_SOCKADDR2STR_F_PORT) &&
+		    a->sinx_family == AF_INET6)
+			ret[reti][of++] = '[';
+
+		if (!(flags & RD_SOCKADDR2STR_F_RESOLVE))
+			niflags |= NI_NUMERICHOST;
+
+		if (getnameinfo((const struct sockaddr *)a,
+				RD_SOCKADDR_INX_LEN(a),
+				ret[reti]+of, sizeof(ret[reti])-of,
+				(flags & RD_SOCKADDR2STR_F_PORT) ?
+				portstr : NULL,
+				(flags & RD_SOCKADDR2STR_F_PORT) ?
+				sizeof(portstr) : 0,
+				niflags))
+			break;
+
+		
+		if (flags & RD_SOCKADDR2STR_F_PORT) {
+			size_t len = strlen(ret[reti]);
+			rd_snprintf(ret[reti]+len, sizeof(ret[reti])-len,
+				 "%s:%s",
+				 a->sinx_family == AF_INET6 ? "]" : "",
+				 portstr);
+		}
+	
+		return ret[reti];
+	}
+	
+
+	/* Error-case */
+	rd_snprintf(ret[reti], sizeof(ret[reti]), "<unsupported:%s>",
+		 rd_family2str(a->sinx_family));
+	
+	return ret[reti];
+}
+
+
+const char *rd_addrinfo_prepare (const char *nodesvc,
+				 char **node, char **svc) {
+	static RD_TLS char snode[256];
+	static RD_TLS char ssvc[64];
+	const char *t;
+	const char *svct = NULL;
+	size_t nodelen = 0;
+
+	*snode = '\0';
+	*ssvc = '\0';
+
+	if (*nodesvc == '[') {
+		/* "[host]".. (enveloped node name) */
+		if  (!(t = strchr(nodesvc, ']')))
+			return "Missing close-']'";
+		nodesvc++;
+		nodelen = t-nodesvc;
+		svct = t+1;
+
+	} else if (*nodesvc == ':' && *(nodesvc+1) != ':') {
+		/* ":"..  (port only) */
+		nodelen = 0;
+		svct = nodesvc;
+	}
+		
+	if ((svct = strrchr(svct ? svct : nodesvc, ':')) && (*(svct-1) != ':') &&
+	    *(++svct)) {
+		/* Optional ":service" definition. */
+		if (strlen(svct) >= sizeof(ssvc))
+			return "Service name too long";
+		strcpy(ssvc, svct);
+		if (!nodelen)
+			nodelen = svct - nodesvc - 1;
+
+	} else if (!nodelen)
+		nodelen = strlen(nodesvc);
+
+	if (nodelen) {
+		/* Truncate nodename if necessary. */
+		nodelen = RD_MIN(nodelen, sizeof(snode)-1);
+		strncpy(snode, nodesvc, nodelen);
+		snode[nodelen] = '\0';
+	}
+
+	*node = snode;
+	*svc = ssvc;
+
+	return NULL;
+}
+
+
+
+rd_sockaddr_list_t *rd_getaddrinfo (const char *nodesvc, const char *defsvc,
+				    int flags, int family,
+				    int socktype, int protocol,
+				    const char **errstr) {
+	struct addrinfo hints = { .ai_family = family,
+				  .ai_socktype = socktype,
+				  .ai_protocol = protocol,
+				  .ai_flags = flags };
+	struct addrinfo *ais, *ai;
+	char *node, *svc;
+	int r;
+	int cnt = 0;
+	rd_sockaddr_list_t *rsal;
+
+	if ((*errstr = rd_addrinfo_prepare(nodesvc, &node, &svc))) {
+		errno = EINVAL;
+		return NULL;
+	}
+
+	if (*svc)
+		defsvc = svc;
+		
+	if ((r = getaddrinfo(node, defsvc, &hints, &ais))) {
+#ifdef EAI_SYSTEM
+		if (r == EAI_SYSTEM)
+#else
+		if (0)
+#endif
+			*errstr = rd_strerror(errno);
+		else {
+#ifdef _MSC_VER
+			*errstr = gai_strerrorA(r);
+#else
+			*errstr = gai_strerror(r);
+#endif
+			errno = EFAULT;
+		}
+		return NULL;
+	}
+	
+	/* Count number of addresses */
+	for (ai = ais ; ai != NULL ; ai = ai->ai_next)
+		cnt++;
+
+	if (cnt == 0) {
+		/* unlikely? */
+		freeaddrinfo(ais);
+		errno = ENOENT;
+		*errstr = "No addresses";
+		return NULL;
+	}
+
+
+	rsal = rd_calloc(1, sizeof(*rsal) + (sizeof(*rsal->rsal_addr) * cnt));
+
+	for (ai = ais ; ai != NULL ; ai = ai->ai_next)
+		memcpy(&rsal->rsal_addr[rsal->rsal_cnt++],
+		       ai->ai_addr, ai->ai_addrlen);
+
+	freeaddrinfo(ais);
+
+	/* Shuffle address list for proper round-robin */
+	if (!(flags & RD_AI_NOSHUFFLE))
+		rd_array_shuffle(rsal->rsal_addr, rsal->rsal_cnt,
+				 sizeof(*rsal->rsal_addr));
+
+	return rsal;
+}
+
+
+
+void rd_sockaddr_list_destroy (rd_sockaddr_list_t *rsal) {
+	rd_free(rsal);
+}
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/9f66960e/thirdparty/librdkafka-0.11.1/src/rdaddr.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdaddr.h b/thirdparty/librdkafka-0.11.1/src/rdaddr.h
new file mode 100644
index 0000000..dd1c419
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.1/src/rdaddr.h
@@ -0,0 +1,184 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met: 
+ * 
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer. 
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution. 
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#ifndef _MSC_VER
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#else
+#define WIN32_MEAN_AND_LEAN
+#include <WinSock2.h>
+#include <ws2ipdef.h>
+#endif
+
+#if defined(__FreeBSD__) || defined(_AIX)
+#include <sys/socket.h>
+#endif
+
+/**
+ * rd_sockaddr_inx_t is a union for either ipv4 or ipv6 sockaddrs.
+ * It provides conveniant abstraction of AF_INET* agnostic operations.
+ */
+typedef union {
+	struct sockaddr_in in;
+	struct sockaddr_in6 in6;
+} rd_sockaddr_inx_t;
+#define sinx_family in.sin_family
+#define sinx_addr   in.sin_addr
+#define RD_SOCKADDR_INX_LEN(sinx) \
+	((sinx)->sinx_family == AF_INET ? sizeof(struct sockaddr_in) :	\
+	 (sinx)->sinx_family == AF_INET6 ? sizeof(struct sockaddr_in6):	\
+	 sizeof(rd_sockaddr_inx_t))
+#define RD_SOCKADDR_INX_PORT(sinx)					\
+	((sinx)->sinx_family == AF_INET ? (sinx)->in.sin_port :		\
+	 (sinx)->sinx_family == AF_INET6 ? (sinx)->in6.sin6_port : 0)
+
+#define RD_SOCKADDR_INX_PORT_SET(sinx,port) do {		\
+	if ((sinx)->sinx_family == AF_INET)			\
+		(sinx)->in.sin_port = port;			\
+	else if ((sinx)->sinx_family == AF_INET6)		\
+		(sinx)->in6.sin6_port = port;			\
+ } while (0)
+
+
+
+/**
+ * Returns a thread-local temporary string (may be called up to 32 times
+ * without buffer wrapping) containing the human string representation
+ * of the sockaddr (which should be AF_INET or AF_INET6 at this point).
+ * If the RD_SOCKADDR2STR_F_PORT is provided the port number will be
+ * appended to the string.
+ * IPv6 address enveloping ("[addr]:port") will also be performed
+ * if .._F_PORT is set.
+ */
+#define RD_SOCKADDR2STR_F_PORT    0x1  /* Append the port. */
+#define RD_SOCKADDR2STR_F_RESOLVE 0x2  /* Try to resolve address to hostname. */
+#define RD_SOCKADDR2STR_F_FAMILY  0x4  /* Prepend address family. */
+#define RD_SOCKADDR2STR_F_NICE         /* Nice and friendly output */ \
+	(RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_RESOLVE)
+const char *rd_sockaddr2str (const void *addr, int flags);
+
+
+/**
+ * Splits a node:service definition up into their node and svc counterparts
+ * suitable for passing to getaddrinfo().
+ * Returns NULL on success (and temporarily available pointers in '*node'
+ * and '*svc') or error string on failure.
+ *
+ * Thread-safe but returned buffers in '*node' and '*svc' are only
+ * usable until the next call to rd_addrinfo_prepare() in the same thread.
+ */
+const char *rd_addrinfo_prepare (const char *nodesvc,
+				 char **node, char **svc);
+
+
+
+typedef struct rd_sockaddr_list_s {
+	int rsal_cnt;
+	int rsal_curr;
+	rd_sockaddr_inx_t rsal_addr[];
+} rd_sockaddr_list_t;
+
+
+/**
+ * Returns the next address from a sockaddr list and updates
+ * the current-index to point to it.
+ *
+ * Typical usage is for round-robin connection attempts or similar:
+ *   while (1) {
+ *       rd_sockaddr_inx_t *sinx = rd_sockaddr_list_next(my_server_list);
+ *       if (do_connect((struct sockaddr *)sinx) == -1) {
+ *          sleep(1);
+ *          continue;
+ *       }
+ *       ...
+ *   }
+ * 
+ */
+ 
+static RD_INLINE rd_sockaddr_inx_t *
+rd_sockaddr_list_next (rd_sockaddr_list_t *rsal) RD_UNUSED;
+static RD_INLINE rd_sockaddr_inx_t *
+rd_sockaddr_list_next (rd_sockaddr_list_t *rsal) {
+	rsal->rsal_curr = (rsal->rsal_curr + 1) % rsal->rsal_cnt;
+	return &rsal->rsal_addr[rsal->rsal_curr];
+}
+
+
+#define RD_SOCKADDR_LIST_FOREACH(sinx, rsal)			\
+	for  ((sinx) = &(rsal)->rsal_addr[0] ;			\
+	      (sinx) < &(rsal)->rsal_addr[(rsal)->rsal_len] ;	\
+	      (sinx)++)
+
+/**
+ * Wrapper for getaddrinfo(3) that performs these additional tasks:
+ *  - Input is a combined "<node>[:<svc>]" string, with support for
+ *    IPv6 enveloping ("[addr]:port").
+ *  - Returns a rd_sockaddr_list_t which must be freed with
+ *    rd_sockaddr_list_destroy() when done with it.
+ *  - Automatically shuffles the returned address list to provide
+ *    round-robin (unless RD_AI_NOSHUFFLE is provided in 'flags').
+ *
+ * Thread-safe.
+ */
+#define RD_AI_NOSHUFFLE  0x10000000 /* Dont shuffle returned address list.
+				     * FIXME: Guessing non-used bits like this
+				     *        is a bad idea. */
+
+rd_sockaddr_list_t *rd_getaddrinfo (const char *nodesvc, const char *defsvc,
+				    int flags, int family,
+				    int socktype, int protocol,
+				    const char **errstr);
+
+
+
+/**
+ * Frees a sockaddr list.
+ *
+ * Thread-safe.
+ */
+void rd_sockaddr_list_destroy (rd_sockaddr_list_t *rsal);
+
+
+
+/**
+ * Returns the human readable name of a socket family.
+ */
+static const char *rd_family2str (int af) RD_UNUSED;
+static const char *rd_family2str (int af) {
+	switch(af){
+		case AF_INET:
+			return "inet";
+		case AF_INET6:
+			return "inet6";
+		default:
+			return "af?";
+	};
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/9f66960e/thirdparty/librdkafka-0.11.1/src/rdatomic.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdatomic.h b/thirdparty/librdkafka-0.11.1/src/rdatomic.h
new file mode 100644
index 0000000..99099f7
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.1/src/rdatomic.h
@@ -0,0 +1,188 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014-2016 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#pragma once
+
+#include "tinycthread.h"
+
+typedef struct {
+	int32_t val;
+#ifndef HAVE_ATOMICS_32
+	mtx_t lock;
+#endif
+} rd_atomic32_t;
+
+typedef struct {
+	int64_t val;
+#ifndef HAVE_ATOMICS_64
+	mtx_t lock;
+#endif
+} rd_atomic64_t;
+
+
+static RD_INLINE RD_UNUSED void rd_atomic32_init (rd_atomic32_t *ra, int32_t v) {
+	ra->val = v;
+#if !defined(_MSC_VER) && !defined(HAVE_ATOMICS_32)
+	mtx_init(&ra->lock, mtx_plain);
+#endif
+}
+
+
+static RD_INLINE int32_t RD_UNUSED rd_atomic32_add (rd_atomic32_t *ra, int32_t v) {
+#ifdef __SUNPRO_C
+	return atomic_add_32_nv(&ra->val, v);
+#elif defined(_MSC_VER)
+	return InterlockedAdd(&ra->val, v);
+#elif !defined(HAVE_ATOMICS_32)
+	int32_t r;
+	mtx_lock(&ra->lock);
+	ra->val += v;
+	r = ra->val;
+	mtx_unlock(&ra->lock);
+	return r;
+#else
+	return ATOMIC_OP32(add, fetch, &ra->val, v);
+#endif
+}
+
+static RD_INLINE int32_t RD_UNUSED rd_atomic32_sub(rd_atomic32_t *ra, int32_t v) {
+#ifdef __SUNPRO_C
+	return atomic_add_32_nv(&ra->val, -v);
+#elif defined(_MSC_VER)
+	return InterlockedAdd(&ra->val, -v);
+#elif !defined(HAVE_ATOMICS_32)
+	int32_t r;
+	mtx_lock(&ra->lock);
+	ra->val -= v;
+	r = ra->val;
+	mtx_unlock(&ra->lock);
+	return r;
+#else
+	return ATOMIC_OP32(sub, fetch, &ra->val, v);
+#endif
+}
+
+static RD_INLINE int32_t RD_UNUSED rd_atomic32_get(rd_atomic32_t *ra) {
+#if defined(_MSC_VER) || defined(__SUNPRO_C)
+	return ra->val;
+#elif !defined(HAVE_ATOMICS_32)
+	int32_t r;
+	mtx_lock(&ra->lock);
+	r = ra->val;
+	mtx_unlock(&ra->lock);
+	return r;
+#else
+	return ATOMIC_OP32(fetch, add, &ra->val, 0);
+#endif
+}
+
+static RD_INLINE int32_t RD_UNUSED rd_atomic32_set(rd_atomic32_t *ra, int32_t v) {
+#ifdef _MSC_VER
+	return InterlockedExchange(&ra->val, v);
+#elif !defined(HAVE_ATOMICS_32)
+	int32_t r;
+	mtx_lock(&ra->lock);
+	r = ra->val = v;
+	mtx_unlock(&ra->lock);
+	return r;
+#else
+	return ra->val = v; // FIXME
+#endif
+}
+
+
+
+static RD_INLINE RD_UNUSED void rd_atomic64_init (rd_atomic64_t *ra, int64_t v) {
+	ra->val = v;
+#if !defined(_MSC_VER) && !defined(HAVE_ATOMICS_64)
+	mtx_init(&ra->lock, mtx_plain);
+#endif
+}
+
+static RD_INLINE int64_t RD_UNUSED rd_atomic64_add (rd_atomic64_t *ra, int64_t v) {
+#ifdef __SUNPRO_C
+	return atomic_add_64_nv(&ra->val, v);
+#elif defined(_MSC_VER)
+	return InterlockedAdd64(&ra->val, v);
+#elif !defined(HAVE_ATOMICS_64)
+	int64_t r;
+	mtx_lock(&ra->lock);
+	ra->val += v;
+	r = ra->val;
+	mtx_unlock(&ra->lock);
+	return r;
+#else
+	return ATOMIC_OP64(add, fetch, &ra->val, v);
+#endif
+}
+
+static RD_INLINE int64_t RD_UNUSED rd_atomic64_sub(rd_atomic64_t *ra, int64_t v) {
+#ifdef __SUNPRO_C
+	return atomic_add_64_nv(&ra->val, -v);
+#elif defined(_MSC_VER)
+	return InterlockedAdd64(&ra->val, -v);
+#elif !defined(HAVE_ATOMICS_64)
+	int64_t r;
+	mtx_lock(&ra->lock);
+	ra->val -= v;
+	r = ra->val;
+	mtx_unlock(&ra->lock);
+	return r;
+#else
+	return ATOMIC_OP64(sub, fetch, &ra->val, v);
+#endif
+}
+
+static RD_INLINE int64_t RD_UNUSED rd_atomic64_get(rd_atomic64_t *ra) {
+#if defined(_MSC_VER) || defined(__SUNPRO_C)
+	return ra->val;
+#elif !defined(HAVE_ATOMICS_64)
+	int64_t r;
+	mtx_lock(&ra->lock);
+	r = ra->val;
+	mtx_unlock(&ra->lock);
+	return r;
+#else
+	return ATOMIC_OP64(fetch, add, &ra->val, 0);
+#endif
+}
+
+
+static RD_INLINE int64_t RD_UNUSED rd_atomic64_set(rd_atomic64_t *ra, int64_t v) {
+#ifdef _MSC_VER
+	return InterlockedExchange64(&ra->val, v);
+#elif !defined(HAVE_ATOMICS_64)
+	int64_t r;
+	mtx_lock(&ra->lock);
+	ra->val = v;
+	r = ra->val;
+	mtx_unlock(&ra->lock);
+	return r;
+#else
+	return ra->val = v; // FIXME
+#endif
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/9f66960e/thirdparty/librdkafka-0.11.1/src/rdavg.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdavg.h b/thirdparty/librdkafka-0.11.1/src/rdavg.h
new file mode 100644
index 0000000..98661d8
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.1/src/rdavg.h
@@ -0,0 +1,95 @@
+#pragma once
+
+
+typedef struct rd_avg_s {
+        struct {
+                int64_t maxv;
+                int64_t minv;
+                int64_t avg;
+                int64_t sum;
+                int     cnt;
+                rd_ts_t start;
+        } ra_v;
+        mtx_t ra_lock;
+        enum {
+                RD_AVG_GAUGE,
+                RD_AVG_COUNTER,
+        } ra_type;
+} rd_avg_t;
+
+
+/**
+ * Add timestamp 'ts' to averager 'ra'.
+ */
+static RD_UNUSED void rd_avg_add (rd_avg_t *ra, int64_t v) {
+        mtx_lock(&ra->ra_lock);
+	if (v > ra->ra_v.maxv)
+		ra->ra_v.maxv = v;
+	if (ra->ra_v.minv == 0 || v < ra->ra_v.minv)
+		ra->ra_v.minv = v;
+	ra->ra_v.sum += v;
+	ra->ra_v.cnt++;
+        mtx_unlock(&ra->ra_lock);
+}
+
+
+/**
+ * @brief Calculate the average
+ */
+static RD_UNUSED void rd_avg_calc (rd_avg_t *ra, rd_ts_t now) {
+        if (ra->ra_type == RD_AVG_GAUGE) {
+                if (ra->ra_v.cnt)
+                        ra->ra_v.avg = ra->ra_v.sum / ra->ra_v.cnt;
+                else
+                        ra->ra_v.avg = 0;
+        } else {
+                rd_ts_t elapsed = now - ra->ra_v.start;
+
+                if (elapsed)
+                        ra->ra_v.avg = (ra->ra_v.sum * 1000000llu) / elapsed;
+                else
+                        ra->ra_v.avg = 0;
+
+                ra->ra_v.start = elapsed;
+        }
+}
+
+
+/**
+ * Rolls over statistics in 'src' and stores the average in 'dst'.
+ * 'src' is cleared and ready to be reused.
+ */
+static RD_UNUSED void rd_avg_rollover (rd_avg_t *dst,
+					     rd_avg_t *src) {
+        rd_ts_t now = rd_clock();
+
+        mtx_lock(&src->ra_lock);
+        dst->ra_type = src->ra_type;
+	dst->ra_v    = src->ra_v;
+	memset(&src->ra_v, 0, sizeof(src->ra_v));
+        src->ra_v.start = now;
+        mtx_unlock(&src->ra_lock);
+
+        rd_avg_calc(dst, now);
+}
+
+
+/**
+ * Initialize an averager
+ */
+static RD_UNUSED void rd_avg_init (rd_avg_t *ra, int type) {
+        rd_avg_t dummy;
+        memset(ra, 0, sizeof(*ra));
+        mtx_init(&ra->ra_lock, 0);
+        ra->ra_type = type;
+
+        rd_avg_rollover(&dummy, ra);
+}
+
+/**
+ * Destroy averager
+ */
+static RD_UNUSED void rd_avg_destroy (rd_avg_t *ra) {
+        mtx_destroy(&ra->ra_lock);
+}
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/9f66960e/thirdparty/librdkafka-0.11.1/src/rdavl.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdavl.c b/thirdparty/librdkafka-0.11.1/src/rdavl.c
new file mode 100644
index 0000000..2f58dd4
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.1/src/rdavl.c
@@ -0,0 +1,214 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012-2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdavl.h"
+
+/*
+ * AVL tree.
+ * Inspired by Ian Piumarta's tree.h implementation.
+ */
+
+#define RD_AVL_NODE_HEIGHT(ran) ((ran) ? (ran)->ran_height : 0)
+
+#define RD_AVL_NODE_DELTA(ran) \
+        (RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_LEFT]) - \
+         RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_RIGHT]))
+
+#define RD_DELTA_MAX 1
+
+
+static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran);
+
+static rd_avl_node_t *rd_avl_rotate (rd_avl_node_t *ran, rd_avl_dir_t dir) {
+        rd_avl_node_t *n;
+        static const rd_avl_dir_t odirmap[] = { /* opposite direction map */
+                [RD_AVL_RIGHT] = RD_AVL_LEFT,
+                [RD_AVL_LEFT]  = RD_AVL_RIGHT
+        };
+        const int odir = odirmap[dir];
+
+        n = ran->ran_p[odir];
+        ran->ran_p[odir] = n->ran_p[dir];
+        n->ran_p[dir] = rd_avl_balance_node(ran);
+
+        return rd_avl_balance_node(n);
+}
+
+static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran) {
+        const int d = RD_AVL_NODE_DELTA(ran);
+        int h;
+
+        if (d < -RD_DELTA_MAX) {
+                if (RD_AVL_NODE_DELTA(ran->ran_p[RD_AVL_RIGHT]) > 0)
+                        ran->ran_p[RD_AVL_RIGHT] =
+                                rd_avl_rotate(ran->ran_p[RD_AVL_RIGHT],
+                                              RD_AVL_RIGHT);
+                return rd_avl_rotate(ran, RD_AVL_LEFT);
+
+        } else if (d > RD_DELTA_MAX) {
+                if (RD_AVL_NODE_DELTA(ran->ran_p[RD_AVL_LEFT]) < 0)
+                        ran->ran_p[RD_AVL_LEFT] =
+                                rd_avl_rotate(ran->ran_p[RD_AVL_LEFT],
+                                              RD_AVL_LEFT);
+
+                return rd_avl_rotate(ran, RD_AVL_RIGHT);
+        }
+
+        ran->ran_height = 0;
+
+        if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_LEFT])) > ran->ran_height)
+                ran->ran_height = h;
+
+        if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_RIGHT])) >ran->ran_height)
+                ran->ran_height = h;
+
+        ran->ran_height++;
+
+        return ran;
+}
+
+rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl,
+                                   rd_avl_node_t *parent,
+                                   rd_avl_node_t *ran,
+                                   rd_avl_node_t **existing) {
+        rd_avl_dir_t dir;
+        int r;
+
+        if (!parent)
+                return ran;
+
+        if ((r = ravl->ravl_cmp(ran->ran_elm, parent->ran_elm)) == 0) {
+                /* Replace existing node with new one. */
+                ran->ran_p[RD_AVL_LEFT] = parent->ran_p[RD_AVL_LEFT];
+                ran->ran_p[RD_AVL_RIGHT] = parent->ran_p[RD_AVL_RIGHT];
+                ran->ran_height = parent->ran_height;
+                *existing = parent;
+                return ran;
+        }
+
+        if (r < 0)
+                dir = RD_AVL_LEFT;
+        else
+                dir = RD_AVL_RIGHT;
+
+        parent->ran_p[dir] = rd_avl_insert_node(ravl, parent->ran_p[dir],
+                                                ran, existing);
+        return rd_avl_balance_node(parent);
+}
+
+
+static rd_avl_node_t *rd_avl_move (rd_avl_node_t *dst, rd_avl_node_t *src,
+                                   rd_avl_dir_t dir) {
+
+        if (!dst)
+                return src;
+
+        dst->ran_p[dir] = rd_avl_move(dst->ran_p[dir], src, dir);
+
+        return rd_avl_balance_node(dst);
+}
+
+static rd_avl_node_t *rd_avl_remove_node0 (rd_avl_node_t *ran) {
+        rd_avl_node_t *tmp;
+
+        tmp = rd_avl_move(ran->ran_p[RD_AVL_LEFT],
+                          ran->ran_p[RD_AVL_RIGHT],
+                          RD_AVL_RIGHT);
+
+        ran->ran_p[RD_AVL_LEFT] = ran->ran_p[RD_AVL_RIGHT] = NULL;
+        return tmp;
+}
+
+
+rd_avl_node_t *rd_avl_remove_elm0 (rd_avl_t *ravl, rd_avl_node_t *parent,
+                                   const void *elm) {
+        rd_avl_dir_t dir;
+        int r;
+
+        if (!parent)
+                return NULL;
+
+
+        if ((r = ravl->ravl_cmp(elm, parent->ran_elm)) == 0)
+                return rd_avl_remove_node0(parent);
+        else if  (r < 0)
+                dir = RD_AVL_LEFT;
+        else /* > 0 */
+                dir = RD_AVL_RIGHT;
+
+        parent->ran_p[dir] =
+                rd_avl_remove_elm0(ravl, parent->ran_p[dir], elm);
+
+        return rd_avl_balance_node(parent);
+}
+
+
+
+rd_avl_node_t *rd_avl_find_node (const rd_avl_t *ravl,
+                                 const rd_avl_node_t *begin,
+                                 const void *elm) {
+        int r;
+
+        if (!begin)
+                return NULL;
+        else if (!(r = ravl->ravl_cmp(elm, begin->ran_elm)))
+                return (rd_avl_node_t *)begin;
+        else if (r < 0)
+                return rd_avl_find_node(ravl, begin->ran_p[RD_AVL_LEFT], elm);
+        else /* r > 0 */
+                return rd_avl_find_node(ravl, begin->ran_p[RD_AVL_RIGHT], elm);
+}
+
+
+
+void rd_avl_destroy (rd_avl_t *ravl) {
+        if (ravl->ravl_flags & RD_AVL_F_LOCKS)
+                rwlock_destroy(&ravl->ravl_rwlock);
+
+        if (ravl->ravl_flags & RD_AVL_F_OWNER)
+                free(ravl);
+}
+
+rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags) {
+
+        if (!ravl) {
+                ravl = calloc(1, sizeof(*ravl));
+                flags |= RD_AVL_F_OWNER;
+        } else {
+                memset(ravl, 0, sizeof(*ravl));
+        }
+
+        ravl->ravl_flags = flags;
+        ravl->ravl_cmp = cmp;
+
+        if (flags & RD_AVL_F_LOCKS)
+                rwlock_init(&ravl->ravl_rwlock);
+
+        return ravl;
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/9f66960e/thirdparty/librdkafka-0.11.1/src/rdavl.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/src/rdavl.h b/thirdparty/librdkafka-0.11.1/src/rdavl.h
new file mode 100644
index 0000000..ffd33dd
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.1/src/rdavl.h
@@ -0,0 +1,253 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012-2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/*
+ * AVL tree.
+ * Inspired by Ian Piumarta's tree.h implementation.
+ */
+
+#pragma once
+
+#include "tinycthread.h"
+
+
+typedef enum {
+        RD_AVL_LEFT,
+        RD_AVL_RIGHT,
+} rd_avl_dir_t;
+
+/**
+ * AVL tree node.
+ * Add 'rd_avl_node_t ..' as field to your element's struct and
+ * provide it as the 'field' argument in the API below.
+ */
+typedef struct rd_avl_node_s {
+        struct rd_avl_node_s *ran_p[2];    /* RD_AVL_LEFT and RD_AVL_RIGHT */
+        int                   ran_height;  /* Sub-tree height */
+        void                 *ran_elm;     /* Backpointer to the containing
+                                            * element. This could be considered
+                                            * costly but is convenient for the
+                                            * caller: RAM is cheap,
+                                            * development time isn't*/
+} rd_avl_node_t;
+
+
+
+/**
+ * Per-AVL application-provided element comparator.
+ */
+typedef int (*rd_avl_cmp_t) (const void *, const void *);
+
+
+/**
+ * AVL tree
+ */
+typedef struct rd_avl_s {
+        rd_avl_node_t *ravl_root;   /* Root node */
+        rd_avl_cmp_t   ravl_cmp;    /* Comparator */
+        int            ravl_flags;  /* Flags */
+#define RD_AVL_F_LOCKS      0x1     /* Enable thread-safeness */
+#define RD_AVL_F_OWNER      0x2     /* internal: rd_avl_init() allocated ravl */
+        rwlock_t       ravl_rwlock; /* Mutex when .._F_LOCKS is set. */
+} rd_avl_t;
+
+
+
+
+/**
+ *
+ *
+ * Public API
+ *
+ *
+ */
+
+/**
+ * Insert 'elm' into AVL tree.
+ * In case of collision the previous entry is overwritten by the
+ * new one and the previous element is returned, else NULL.
+ */
+#define RD_AVL_INSERT(ravl,elm,field)           \
+        rd_avl_insert(ravl, elm, &(elm)->field)
+
+
+/**
+ * Remove element by matching value 'elm' using compare function.
+ */
+#define RD_AVL_REMOVE_ELM(ravl,elm)     \
+        rd_avl_remove_elm(ravl, elm)
+
+/**
+ * Search for (by value using compare function) and return matching elm.
+ */
+#define RD_AVL_FIND(ravl,elm)           \
+        rd_avl_find(ravl, elm, 1)
+
+
+/**
+ * Search (by value using compare function) for and return matching elm.
+ * Same as RD_AVL_FIND_NL() but assumes 'ravl' ís already locked
+ * by 'rd_avl_*lock()'.
+ *
+ * NOTE: rd_avl_wrlock() must be held.
+ */
+#define RD_AVL_FIND_NL(ravl,elm)                \
+        rd_avl_find_node(ravl, (ravl)->ravl_root, elm, 0)
+
+
+/**
+ * Search (by value using compare function) for elm and return its AVL node.
+ *
+ * NOTE: rd_avl_wrlock() must be held.
+ */
+#define RD_AVL_FIND_NODE_NL(ravl,elm)           \
+        rd_avl_find(ravl, elm, 0)
+
+
+/**
+ * Changes the element pointer for an existing AVL node in the tree.
+ * The new element must be identical (according to the comparator) 
+ * to the previous element.
+ *
+ * NOTE: rd_avl_wrlock() must be held.
+ */
+#define RD_AVL_ELM_SET_NL(ran,elm)  ((ran)->ran_elm = (elm))
+
+/**
+ * Returns the current element pointer for an existing AVL node in the tree
+ * 
+ * NOTE: rd_avl_*lock() must be held.
+ */
+#define RD_AVL_ELM_GET_NL(ran)      ((ran)->ran_elm)
+
+
+
+/**
+ * Destroy previously initialized (by rd_avl_init()) AVL tree.
+ */
+void      rd_avl_destroy (rd_avl_t *ravl);
+
+/**
+ * Initialize (and optionally allocate if 'ravl' is NULL) AVL tree.
+ * 'cmp' is the comparison function that takes two const pointers
+ * pointing to the elements being compared (rather than the avl_nodes).
+ * 'flags' is zero or more of the RD_AVL_F_.. flags.
+ *
+ * For thread-safe AVL trees supply RD_AVL_F_LOCKS in 'flags'.
+ */
+rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags);
+
+
+/**
+ * 'ravl' locking functions.
+ * Locking is performed automatically for all methods except for
+ * those with the "_NL"/"_nl" suffix ("not locked") which expects
+ * either read or write lock to be held.
+ *
+ * rdavl utilizes rwlocks to allow multiple concurrent read threads.
+ */
+static RD_INLINE RD_UNUSED void rd_avl_rdlock (rd_avl_t *ravl) {
+        if (ravl->ravl_flags & RD_AVL_F_LOCKS)
+                rwlock_rdlock(&ravl->ravl_rwlock);
+}
+
+static RD_INLINE RD_UNUSED void rd_avl_wrlock (rd_avl_t *ravl) {
+        if (ravl->ravl_flags & RD_AVL_F_LOCKS)
+                rwlock_wrlock(&ravl->ravl_rwlock);
+}
+
+static RD_INLINE RD_UNUSED void rd_avl_rdunlock (rd_avl_t *ravl) {
+        if (ravl->ravl_flags & RD_AVL_F_LOCKS)
+                rwlock_rdunlock(&ravl->ravl_rwlock);
+}
+
+static RD_INLINE RD_UNUSED void rd_avl_wrunlock (rd_avl_t *ravl) {
+        if (ravl->ravl_flags & RD_AVL_F_LOCKS)
+                rwlock_wrunlock(&ravl->ravl_rwlock);
+}
+
+
+
+
+/**
+ * Private API, dont use directly.
+ */
+
+rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl,
+                                   rd_avl_node_t *parent,
+                                   rd_avl_node_t *ran,
+                                   rd_avl_node_t **existing);
+
+static RD_UNUSED void *rd_avl_insert (rd_avl_t *ravl, void *elm,
+                            rd_avl_node_t *ran) {
+        rd_avl_node_t *existing = NULL;
+
+        memset(ran, 0, sizeof(*ran));
+        ran->ran_elm = elm;
+
+        rd_avl_wrlock(ravl);
+        ravl->ravl_root = rd_avl_insert_node(ravl, ravl->ravl_root,
+                                             ran, &existing);
+        rd_avl_wrunlock(ravl);
+
+        return existing ? existing->ran_elm : NULL;
+}
+
+rd_avl_node_t *rd_avl_remove_elm0 (rd_avl_t *ravl, rd_avl_node_t *parent,
+                                   const void *elm);
+
+static RD_INLINE RD_UNUSED
+void rd_avl_remove_elm (rd_avl_t *ravl, const void *elm) {
+        rd_avl_wrlock(ravl);
+        ravl->ravl_root = rd_avl_remove_elm0(ravl, ravl->ravl_root, elm);
+        rd_avl_wrunlock(ravl);
+}
+
+
+rd_avl_node_t *rd_avl_find_node (const rd_avl_t *ravl,
+                                 const rd_avl_node_t *begin,
+                                 const void *elm);
+
+
+static RD_INLINE RD_UNUSED void *rd_avl_find (rd_avl_t *ravl, const void *elm,
+                                              int dolock) {
+        const rd_avl_node_t *ran;
+        void *ret;
+
+        if (dolock)
+                rd_avl_rdlock(ravl);
+
+        ran = rd_avl_find_node(ravl, ravl->ravl_root, elm);
+        ret = ran ? ran->ran_elm : NULL;
+
+        if (dolock)
+                rd_avl_rdunlock(ravl);
+
+        return ret;
+}