You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nifi.apache.org by ph...@apache.org on 2018/06/06 14:14:39 UTC

[11/51] [partial] nifi-minifi-cpp git commit: MINIFICPP-512 - upgrade to librdkafka 0.11.4

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/wingetopt.c
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/wingetopt.c b/thirdparty/librdkafka-0.11.1/win32/wingetopt.c
deleted file mode 100644
index 50ed2f0..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/wingetopt.c
+++ /dev/null
@@ -1,564 +0,0 @@
-/*	$OpenBSD: getopt_long.c,v 1.23 2007/10/31 12:34:57 chl Exp $	*/
-/*	$NetBSD: getopt_long.c,v 1.15 2002/01/31 22:43:40 tv Exp $	*/
-
-/*
- * Copyright (c) 2002 Todd C. Miller <To...@courtesan.com>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- * Sponsored in part by the Defense Advanced Research Projects
- * Agency (DARPA) and Air Force Research Laboratory, Air Force
- * Materiel Command, USAF, under agreement number F39502-99-1-0512.
- */
-/*-
- * Copyright (c) 2000 The NetBSD Foundation, Inc.
- * All rights reserved.
- *
- * This code is derived from software contributed to The NetBSD Foundation
- * by Dieter Baron and Thomas Klausner.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <errno.h>
-#include <stdlib.h>
-#include <string.h>
-#include "wingetopt.h"
-#include <stdarg.h>
-#include <stdio.h>
-#include <windows.h>
-
-#define	REPLACE_GETOPT		/* use this getopt as the system getopt(3) */
-
-#ifdef REPLACE_GETOPT
-int	opterr = 1;		/* if error message should be printed */
-int	optind = 1;		/* index into parent argv vector */
-int	optopt = '?';		/* character checked for validity */
-#undef	optreset		/* see getopt.h */
-#define	optreset		__mingw_optreset
-int	optreset;		/* reset getopt */
-char    *optarg;		/* argument associated with option */
-#endif
-
-#define PRINT_ERROR	((opterr) && (*options != ':'))
-
-#define FLAG_PERMUTE	0x01	/* permute non-options to the end of argv */
-#define FLAG_ALLARGS	0x02	/* treat non-options as args to option "-1" */
-#define FLAG_LONGONLY	0x04	/* operate as getopt_long_only */
-
-/* return values */
-#define	BADCH		(int)'?'
-#define	BADARG		((*options == ':') ? (int)':' : (int)'?')
-#define	INORDER 	(int)1
-
-#ifndef __CYGWIN__
-#define __progname __argv[0]
-#else
-extern char __declspec(dllimport) *__progname;
-#endif
-
-#ifdef __CYGWIN__
-static char EMSG[] = "";
-#else
-#define	EMSG		""
-#endif
-
-static int getopt_internal(int, char * const *, const char *,
-			   const struct option *, int *, int);
-static int parse_long_options(char * const *, const char *,
-			      const struct option *, int *, int);
-static int gcd(int, int);
-static void permute_args(int, int, int, char * const *);
-
-static char *place = EMSG; /* option letter processing */
-
-/* XXX: set optreset to 1 rather than these two */
-static int nonopt_start = -1; /* first non option argument (for permute) */
-static int nonopt_end = -1;   /* first option after non options (for permute) */
-
-/* Error messages */
-static const char recargchar[] = "option requires an argument -- %c";
-static const char recargstring[] = "option requires an argument -- %s";
-static const char ambig[] = "ambiguous option -- %.*s";
-static const char noarg[] = "option doesn't take an argument -- %.*s";
-static const char illoptchar[] = "unknown option -- %c";
-static const char illoptstring[] = "unknown option -- %s";
-
-static void
-_vwarnx(const char *fmt,va_list ap)
-{
-  (void)fprintf(stderr,"%s: ",__progname);
-  if (fmt != NULL)
-    (void)vfprintf(stderr,fmt,ap);
-  (void)fprintf(stderr,"\n");
-}
-
-static void
-warnx(const char *fmt,...)
-{
-  va_list ap;
-  va_start(ap,fmt);
-  _vwarnx(fmt,ap);
-  va_end(ap);
-}
-
-/*
- * Compute the greatest common divisor of a and b.
- */
-static int
-gcd(int a, int b)
-{
-	int c;
-
-	c = a % b;
-	while (c != 0) {
-		a = b;
-		b = c;
-		c = a % b;
-	}
-
-	return (b);
-}
-
-/*
- * Exchange the block from nonopt_start to nonopt_end with the block
- * from nonopt_end to opt_end (keeping the same order of arguments
- * in each block).
- */
-static void
-permute_args(int panonopt_start, int panonopt_end, int opt_end,
-	char * const *nargv)
-{
-	int cstart, cyclelen, i, j, ncycle, nnonopts, nopts, pos;
-	char *swap;
-
-	/*
-	 * compute lengths of blocks and number and size of cycles
-	 */
-	nnonopts = panonopt_end - panonopt_start;
-	nopts = opt_end - panonopt_end;
-	ncycle = gcd(nnonopts, nopts);
-	cyclelen = (opt_end - panonopt_start) / ncycle;
-
-	for (i = 0; i < ncycle; i++) {
-		cstart = panonopt_end+i;
-		pos = cstart;
-		for (j = 0; j < cyclelen; j++) {
-			if (pos >= panonopt_end)
-				pos -= nnonopts;
-			else
-				pos += nopts;
-			swap = nargv[pos];
-			/* LINTED const cast */
-			((char **) nargv)[pos] = nargv[cstart];
-			/* LINTED const cast */
-			((char **)nargv)[cstart] = swap;
-		}
-	}
-}
-
-/*
- * parse_long_options --
- *	Parse long options in argc/argv argument vector.
- * Returns -1 if short_too is set and the option does not match long_options.
- */
-static int
-parse_long_options(char * const *nargv, const char *options,
-	const struct option *long_options, int *idx, int short_too)
-{
-	char *current_argv, *has_equal;
-	size_t current_argv_len;
-	int i, ambiguous, match;
-
-#define IDENTICAL_INTERPRETATION(_x, _y)                                \
-	(long_options[(_x)].has_arg == long_options[(_y)].has_arg &&    \
-	 long_options[(_x)].flag == long_options[(_y)].flag &&          \
-	 long_options[(_x)].val == long_options[(_y)].val)
-
-	current_argv = place;
-	match = -1;
-	ambiguous = 0;
-
-	optind++;
-
-	if ((has_equal = strchr(current_argv, '=')) != NULL) {
-		/* argument found (--option=arg) */
-		current_argv_len = has_equal - current_argv;
-		has_equal++;
-	} else
-		current_argv_len = strlen(current_argv);
-
-	for (i = 0; long_options[i].name; i++) {
-		/* find matching long option */
-		if (strncmp(current_argv, long_options[i].name,
-		    current_argv_len))
-			continue;
-
-		if (strlen(long_options[i].name) == current_argv_len) {
-			/* exact match */
-			match = i;
-			ambiguous = 0;
-			break;
-		}
-		/*
-		 * If this is a known short option, don't allow
-		 * a partial match of a single character.
-		 */
-		if (short_too && current_argv_len == 1)
-			continue;
-
-		if (match == -1)	/* partial match */
-			match = i;
-		else if (!IDENTICAL_INTERPRETATION(i, match))
-			ambiguous = 1;
-	}
-	if (ambiguous) {
-		/* ambiguous abbreviation */
-		if (PRINT_ERROR)
-			warnx(ambig, (int)current_argv_len,
-			     current_argv);
-		optopt = 0;
-		return (BADCH);
-	}
-	if (match != -1) {		/* option found */
-		if (long_options[match].has_arg == no_argument
-		    && has_equal) {
-			if (PRINT_ERROR)
-				warnx(noarg, (int)current_argv_len,
-				     current_argv);
-			/*
-			 * XXX: GNU sets optopt to val regardless of flag
-			 */
-			if (long_options[match].flag == NULL)
-				optopt = long_options[match].val;
-			else
-				optopt = 0;
-			return (BADARG);
-		}
-		if (long_options[match].has_arg == required_argument ||
-		    long_options[match].has_arg == optional_argument) {
-			if (has_equal)
-				optarg = has_equal;
-			else if (long_options[match].has_arg ==
-			    required_argument) {
-				/*
-				 * optional argument doesn't use next nargv
-				 */
-				optarg = nargv[optind++];
-			}
-		}
-		if ((long_options[match].has_arg == required_argument)
-		    && (optarg == NULL)) {
-			/*
-			 * Missing argument; leading ':' indicates no error
-			 * should be generated.
-			 */
-			if (PRINT_ERROR)
-				warnx(recargstring,
-				    current_argv);
-			/*
-			 * XXX: GNU sets optopt to val regardless of flag
-			 */
-			if (long_options[match].flag == NULL)
-				optopt = long_options[match].val;
-			else
-				optopt = 0;
-			--optind;
-			return (BADARG);
-		}
-	} else {			/* unknown option */
-		if (short_too) {
-			--optind;
-			return (-1);
-		}
-		if (PRINT_ERROR)
-			warnx(illoptstring, current_argv);
-		optopt = 0;
-		return (BADCH);
-	}
-	if (idx)
-		*idx = match;
-	if (long_options[match].flag) {
-		*long_options[match].flag = long_options[match].val;
-		return (0);
-	} else
-		return (long_options[match].val);
-#undef IDENTICAL_INTERPRETATION
-}
-
-/*
- * getopt_internal --
- *	Parse argc/argv argument vector.  Called by user level routines.
- */
-static int
-getopt_internal(int nargc, char * const *nargv, const char *options,
-	const struct option *long_options, int *idx, int flags)
-{
-	char *oli;				/* option letter list index */
-	int optchar, short_too;
-	static int posixly_correct = -1;
-
-	if (options == NULL)
-		return (-1);
-
-	/*
-	 * XXX Some GNU programs (like cvs) set optind to 0 instead of
-	 * XXX using optreset.  Work around this braindamage.
-	 */
-	if (optind == 0)
-		optind = optreset = 1;
-
-	/*
-	 * Disable GNU extensions if POSIXLY_CORRECT is set or options
-	 * string begins with a '+'.
-	 *
-	 * CV, 2009-12-14: Check POSIXLY_CORRECT anew if optind == 0 or
-	 *                 optreset != 0 for GNU compatibility.
-	 */
-#ifndef _MSC_VER
-	if (posixly_correct == -1 || optreset != 0)
-		posixly_correct = (getenv("POSIXLY_CORRECT") != NULL);
-#endif
-	if (*options == '-')
-		flags |= FLAG_ALLARGS;
-	else if (posixly_correct || *options == '+')
-		flags &= ~FLAG_PERMUTE;
-	if (*options == '+' || *options == '-')
-		options++;
-
-	optarg = NULL;
-	if (optreset)
-		nonopt_start = nonopt_end = -1;
-start:
-	if (optreset || !*place) {		/* update scanning pointer */
-		optreset = 0;
-		if (optind >= nargc) {          /* end of argument vector */
-			place = EMSG;
-			if (nonopt_end != -1) {
-				/* do permutation, if we have to */
-				permute_args(nonopt_start, nonopt_end,
-				    optind, nargv);
-				optind -= nonopt_end - nonopt_start;
-			}
-			else if (nonopt_start != -1) {
-				/*
-				 * If we skipped non-options, set optind
-				 * to the first of them.
-				 */
-				optind = nonopt_start;
-			}
-			nonopt_start = nonopt_end = -1;
-			return (-1);
-		}
-		if (*(place = nargv[optind]) != '-' ||
-		    (place[1] == '\0' && strchr(options, '-') == NULL)) {
-			place = EMSG;		/* found non-option */
-			if (flags & FLAG_ALLARGS) {
-				/*
-				 * GNU extension:
-				 * return non-option as argument to option 1
-				 */
-				optarg = nargv[optind++];
-				return (INORDER);
-			}
-			if (!(flags & FLAG_PERMUTE)) {
-				/*
-				 * If no permutation wanted, stop parsing
-				 * at first non-option.
-				 */
-				return (-1);
-			}
-			/* do permutation */
-			if (nonopt_start == -1)
-				nonopt_start = optind;
-			else if (nonopt_end != -1) {
-				permute_args(nonopt_start, nonopt_end,
-				    optind, nargv);
-				nonopt_start = optind -
-				    (nonopt_end - nonopt_start);
-				nonopt_end = -1;
-			}
-			optind++;
-			/* process next argument */
-			goto start;
-		}
-		if (nonopt_start != -1 && nonopt_end == -1)
-			nonopt_end = optind;
-
-		/*
-		 * If we have "-" do nothing, if "--" we are done.
-		 */
-		if (place[1] != '\0' && *++place == '-' && place[1] == '\0') {
-			optind++;
-			place = EMSG;
-			/*
-			 * We found an option (--), so if we skipped
-			 * non-options, we have to permute.
-			 */
-			if (nonopt_end != -1) {
-				permute_args(nonopt_start, nonopt_end,
-				    optind, nargv);
-				optind -= nonopt_end - nonopt_start;
-			}
-			nonopt_start = nonopt_end = -1;
-			return (-1);
-		}
-	}
-
-	/*
-	 * Check long options if:
-	 *  1) we were passed some
-	 *  2) the arg is not just "-"
-	 *  3) either the arg starts with -- we are getopt_long_only()
-	 */
-	if (long_options != NULL && place != nargv[optind] &&
-	    (*place == '-' || (flags & FLAG_LONGONLY))) {
-		short_too = 0;
-		if (*place == '-')
-			place++;		/* --foo long option */
-		else if (*place != ':' && strchr(options, *place) != NULL)
-			short_too = 1;		/* could be short option too */
-
-		optchar = parse_long_options(nargv, options, long_options,
-		    idx, short_too);
-		if (optchar != -1) {
-			place = EMSG;
-			return (optchar);
-		}
-	}
-
-	if ((optchar = (int)*place++) == (int)':' ||
-	    (optchar == (int)'-' && *place != '\0') ||
-	    (oli = strchr(options, optchar)) == NULL) {
-		/*
-		 * If the user specified "-" and  '-' isn't listed in
-		 * options, return -1 (non-option) as per POSIX.
-		 * Otherwise, it is an unknown option character (or ':').
-		 */
-		if (optchar == (int)'-' && *place == '\0')
-			return (-1);
-		if (!*place)
-			++optind;
-		if (PRINT_ERROR)
-			warnx(illoptchar, optchar);
-		optopt = optchar;
-		return (BADCH);
-	}
-	if (long_options != NULL && optchar == 'W' && oli[1] == ';') {
-		/* -W long-option */
-		if (*place)			/* no space */
-			/* NOTHING */;
-		else if (++optind >= nargc) {	/* no arg */
-			place = EMSG;
-			if (PRINT_ERROR)
-				warnx(recargchar, optchar);
-			optopt = optchar;
-			return (BADARG);
-		} else				/* white space */
-			place = nargv[optind];
-		optchar = parse_long_options(nargv, options, long_options,
-		    idx, 0);
-		place = EMSG;
-		return (optchar);
-	}
-	if (*++oli != ':') {			/* doesn't take argument */
-		if (!*place)
-			++optind;
-	} else {				/* takes (optional) argument */
-		optarg = NULL;
-		if (*place)			/* no white space */
-			optarg = place;
-		else if (oli[1] != ':') {	/* arg not optional */
-			if (++optind >= nargc) {	/* no arg */
-				place = EMSG;
-				if (PRINT_ERROR)
-					warnx(recargchar, optchar);
-				optopt = optchar;
-				return (BADARG);
-			} else
-				optarg = nargv[optind];
-		}
-		place = EMSG;
-		++optind;
-	}
-	/* dump back option letter */
-	return (optchar);
-}
-
-#ifdef REPLACE_GETOPT
-/*
- * getopt --
- *	Parse argc/argv argument vector.
- *
- * [eventually this will replace the BSD getopt]
- */
-int
-getopt(int nargc, char * const *nargv, const char *options)
-{
-
-	/*
-	 * We don't pass FLAG_PERMUTE to getopt_internal() since
-	 * the BSD getopt(3) (unlike GNU) has never done this.
-	 *
-	 * Furthermore, since many privileged programs call getopt()
-	 * before dropping privileges it makes sense to keep things
-	 * as simple (and bug-free) as possible.
-	 */
-	return (getopt_internal(nargc, nargv, options, NULL, NULL, 0));
-}
-#endif /* REPLACE_GETOPT */
-
-/*
- * getopt_long --
- *	Parse argc/argv argument vector.
- */
-int
-getopt_long(int nargc, char * const *nargv, const char *options,
-    const struct option *long_options, int *idx)
-{
-
-	return (getopt_internal(nargc, nargv, options, long_options, idx,
-	    FLAG_PERMUTE));
-}
-
-/*
- * getopt_long_only --
- *	Parse argc/argv argument vector.
- */
-int
-getopt_long_only(int nargc, char * const *nargv, const char *options,
-    const struct option *long_options, int *idx)
-{
-
-	return (getopt_internal(nargc, nargv, options, long_options, idx,
-	    FLAG_PERMUTE|FLAG_LONGONLY));
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/wingetopt.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/wingetopt.h b/thirdparty/librdkafka-0.11.1/win32/wingetopt.h
deleted file mode 100644
index 260915b..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/wingetopt.h
+++ /dev/null
@@ -1,95 +0,0 @@
-#ifndef __GETOPT_H__
-/**
- * DISCLAIMER
- * This file has no copyright assigned and is placed in the Public Domain.
- * This file is a part of the w64 mingw-runtime package.
- *
- * The w64 mingw-runtime package and its code is distributed in the hope that it 
- * will be useful but WITHOUT ANY WARRANTY.  ALL WARRANTIES, EXPRESSED OR 
- * IMPLIED ARE HEREBY DISCLAIMED.  This includes but is not limited to 
- * warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#define __GETOPT_H__
-
-/* All the headers include this file. */
-#include <crtdefs.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-extern int optind;		/* index of first non-option in argv      */
-extern int optopt;		/* single option character, as parsed     */
-extern int opterr;		/* flag to enable built-in diagnostics... */
-				/* (user may set to zero, to suppress)    */
-
-extern char *optarg;		/* pointer to argument of current option  */
-
-extern int getopt(int nargc, char * const *nargv, const char *options);
-
-#ifdef _BSD_SOURCE
-/*
- * BSD adds the non-standard `optreset' feature, for reinitialisation
- * of `getopt' parsing.  We support this feature, for applications which
- * proclaim their BSD heritage, before including this header; however,
- * to maintain portability, developers are advised to avoid it.
- */
-# define optreset  __mingw_optreset
-extern int optreset;
-#endif
-#ifdef __cplusplus
-}
-#endif
-/*
- * POSIX requires the `getopt' API to be specified in `unistd.h';
- * thus, `unistd.h' includes this header.  However, we do not want
- * to expose the `getopt_long' or `getopt_long_only' APIs, when
- * included in this manner.  Thus, close the standard __GETOPT_H__
- * declarations block, and open an additional __GETOPT_LONG_H__
- * specific block, only when *not* __UNISTD_H_SOURCED__, in which
- * to declare the extended API.
- */
-#endif /* !defined(__GETOPT_H__) */
-
-#if !defined(__UNISTD_H_SOURCED__) && !defined(__GETOPT_LONG_H__)
-#define __GETOPT_LONG_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct option		/* specification for a long form option...	*/
-{
-  const char *name;		/* option name, without leading hyphens */
-  int         has_arg;		/* does it take an argument?		*/
-  int        *flag;		/* where to save its status, or NULL	*/
-  int         val;		/* its associated status value		*/
-};
-
-enum    		/* permitted values for its `has_arg' field...	*/
-{
-  no_argument = 0,      	/* option never takes an argument	*/
-  required_argument,		/* option always requires an argument	*/
-  optional_argument		/* option may take an argument		*/
-};
-
-extern int getopt_long(int nargc, char * const *nargv, const char *options,
-    const struct option *long_options, int *idx);
-extern int getopt_long_only(int nargc, char * const *nargv, const char *options,
-    const struct option *long_options, int *idx);
-/*
- * Previous MinGW implementation had...
- */
-#ifndef HAVE_DECL_GETOPT
-/*
- * ...for the long form API only; keep this for compatibility.
- */
-# define HAVE_DECL_GETOPT	1
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* !defined(__UNISTD_H_SOURCED__) && !defined(__GETOPT_LONG_H__) */

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.1/win32/wintime.h
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.1/win32/wintime.h b/thirdparty/librdkafka-0.11.1/win32/wintime.h
deleted file mode 100644
index 9db7c7e..0000000
--- a/thirdparty/librdkafka-0.11.1/win32/wintime.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#pragma once
-
-/**
- * gettimeofday() for Win32 from http://stackoverflow.com/questions/10905892/equivalent-of-gettimeday-for-windows
- */
-#define WIN32_LEAN_AND_MEAN
-#include <Windows.h>
-#include <stdint.h> // portable: uint64_t   MSVC: __int64 
-
-static int gettimeofday(struct timeval * tp, struct timezone * tzp)
-{
-        // Note: some broken versions only have 8 trailing zero's, the correct epoch has 9 trailing zero's
-        // This magic number is the number of 100 nanosecond intervals since January 1, 1601 (UTC)
-        // until 00:00:00 January 1, 1970 
-        static const uint64_t EPOCH = ((uint64_t)116444736000000000ULL);
-
-        SYSTEMTIME  system_time;
-        FILETIME    file_time;
-        uint64_t    time;
-
-        GetSystemTime(&system_time);
-        SystemTimeToFileTime(&system_time, &file_time);
-        time = ((uint64_t)file_time.dwLowDateTime);
-        time += ((uint64_t)file_time.dwHighDateTime) << 32;
-
-        tp->tv_sec = (long)((time - EPOCH) / 10000000L);
-        tp->tv_usec = (long)(system_time.wMilliseconds * 1000);
-        return 0;
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/.appveyor.yml
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/.appveyor.yml b/thirdparty/librdkafka-0.11.4/.appveyor.yml
new file mode 100644
index 0000000..2cb8722
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/.appveyor.yml
@@ -0,0 +1,88 @@
+version: 0.11.4-R-pre{build}
+pull_requests:
+  do_not_increment_build_number: true
+image: Visual Studio 2013
+configuration: Release
+environment:
+  matrix:
+  - platform: x64
+  - platform: win32
+install:
+- ps: "$OpenSSLVersion = \"1_0_2o\"\n$OpenSSLExe = \"OpenSSL-$OpenSSLVersion.exe\"\n\nRemove-Item C:\\OpenSSL-Win32 -recurse\nRemove-Item C:\\OpenSSL-Win64 -recurse\n\nWrite-Host \"Installing OpenSSL v1.0 32-bit ...\" -ForegroundColor Cyan\nWrite-Host \"Downloading...\"\n$exePath = \"$($env:USERPROFILE)\\Win32OpenSSL-1_0_2o.exe\"\n(New-Object Net.WebClient).DownloadFile('https://slproweb.com/download/Win32OpenSSL-1_0_2o.exe', $exePath)\nWrite-Host \"Installing...\"\ncmd /c start /wait $exePath /silent /verysilent /sp- /suppressmsgboxes /DIR=C:\\OpenSSL-Win32\nWrite-Host \"Installed\" -ForegroundColor Green\n\nWrite-Host \"Installing OpenSSL v1.0 64-bit ...\" -ForegroundColor Cyan\nWrite-Host \"Downloading...\"\n$exePath = \"$($env:USERPROFILE)\\Win64OpenSSL-1_0_2o.exe\"\n(New-Object Net.WebClient).DownloadFile('https://slproweb.com/download/Win64OpenSSL-1_0_2o.exe', $exePath)\nWrite-Host \"Installing...\"\ncmd /c start /wait $exePath /silent /verysilent /sp- /suppressmsgboxes /DIR=C
 :\\OpenSSL-Win64\nWrite-Host \"Installed\" -ForegroundColor Green\n\nif (!(Test-Path(\"C:\\OpenSSL-Win32\"))) {\n  echo \"Downloading https://slproweb.com/download/Win32$OpenSSLExe\"\n  Start-FileDownload 'https://slproweb.com/download/Win32$OpenSSLExe'\n  Start-Process \"Win32$OpenSSLExe\" -ArgumentList \"/silent /verysilent /sp- /suppressmsgboxes\" -Wait\n} else {\n   echo \"OpenSSL-Win32 already exists: not downloading\"\n}\n\nif (!(Test-Path(\"C:\\OpenSSL-Win64\"))) {\n  echo \"Downloading https://slproweb.com/download/Win64$OpenSSLExe\"\n  Start-FileDownload 'https://slproweb.com/download/Win64$OpenSSLExe' \n  Start-Process \"Win64$OpenSSLExe\" -ArgumentList \"/silent /verysilent /sp- /suppressmsgboxes\" -Wait\n} else {\n   echo \"OpenSSL-Win64 already exists: not downloading\"\n}\n\n\n\n# Download the CoApp tools.\n$msiPath = \"$($env:USERPROFILE)\\CoApp.Tools.Powershell.msi\"\n(New-Object Net.WebClient).DownloadFile('http://coapp.org/files/CoApp.Tools.Powershell.msi', $msiPat
 h)\n\n# Install the CoApp tools from the downloaded .msi.\nStart-Process -FilePath msiexec -ArgumentList /i, $msiPath, /quiet -Wait\n\n# Make the tools available for later PS scripts to use.\n$env:PSModulePath = $env:PSModulePath + ';C:\\Program Files (x86)\\Outercurve Foundation\\Modules'\nImport-Module CoApp\n\n# Install NuGet\n#Install-PackageProvider NuGet -MinimumVersion '2.8.5.201' -Force\n#Import-PackageProvider NuGet -MinimumVersion '2.8.5.201' -Force\n\n# Install CoApp for creating nuget packages\n#$msiPath = \"$($env:USERPROFILE)\\CoApp.Tools.Powershell.msi\"\n#(New-Object #Net.WebClient).DownloadFile('http://downloads.coapp.org/files/CoApp.Tools.Powershell.msi', $msiPath)\n#cmd /c start /wait msiexec /i \"$msiPath\" /quiet\n\n# Install CoApp module\n#Install-Module CoApp -Force"
+cache:
+- c:\OpenSSL-Win32
+- c:\OpenSSL-Win64
+nuget:
+  account_feed: true
+  project_feed: true
+  disable_publish_on_pr: true
+before_build:
+- cmd: nuget restore win32/librdkafka.sln
+build:
+  project: win32/librdkafka.sln
+  publish_nuget: true
+  publish_nuget_symbols: true
+  include_nuget_references: true
+  parallel: true
+  verbosity: normal
+test_script:
+- cmd: if exist DISABLED\win32\outdir\v140 ( win32\outdir\v140\%PLATFORM%\%CONFIGURATION%\tests.exe -l -p1 ) else ( win32\outdir\v120\%PLATFORM%\%CONFIGURATION%\tests.exe -l -p1 )
+artifacts:
+- path: test_report*.json
+  name: Test report
+- path: '*.nupkg'
+  name: Packages
+- path: '**\*.dll'
+  name: Libraries
+- path: '**\*.lib'
+  name: Libraries
+- path: '**\*.pdb'
+  name: Libraries
+- path: '**\*.exe'
+  name: Executables
+before_deploy:
+- ps: >-
+    # FIXME: Add to Deployment condition above:
+
+    # APPVEYOR_REPO_TAG = true
+
+
+
+    # This is the CoApp .autopkg file to create.
+
+    $autopkgFile = "win32/librdkafka.autopkg"
+
+
+    # Get the ".autopkg.template" file, replace "@version" with the Appveyor version number, then save to the ".autopkg" file.
+
+    cat ($autopkgFile + ".template") | % { $_ -replace "@version", $env:appveyor_build_version } > $autopkgFile
+
+
+    # Use the CoApp tools to create NuGet native packages from the .autopkg.
+
+    Write-NuGetPackage $autopkgFile
+
+
+    # Push all newly created .nupkg files as Appveyor artifacts for later deployment.
+
+    Get-ChildItem .\*.nupkg | % { Push-AppveyorArtifact $_.FullName -FileName $_.Name }
+deploy:
+- provider: S3
+  access_key_id:
+    secure: t+Xo4x1mYVbqzvUDlnuMgFGp8LjQJNOfsDUAMxBsVH4=
+  secret_access_key:
+    secure: SNziQPPJs4poCHM7dk6OxufUYcGQhMWiNPx6Y1y6DYuWGjPc3K0APGeousLHsbLv
+  region: us-west-1
+  bucket: librdkafka-ci-packages
+  folder: librdkafka/p-librdkafka__bld-appveyor__plat-windows__arch-$(platform)__bldtype-$(configuration)__tag-$(APPVEYOR_REPO_TAG_NAME)__sha-$(APPVEYOR_REPO_COMMIT)__bid-$(APPVEYOR_BUILD_ID)
+  artifact: /.*\.(nupkg)/
+  max_error_retry: 3
+  on:
+    APPVEYOR_REPO_TAG: true
+notifications:
+- provider: Email
+  to:
+  - magnus@edenhill.se
+  on_build_success: false
+  on_build_failure: true
+  on_build_status_changed: true

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/.dir-locals.el
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/.dir-locals.el b/thirdparty/librdkafka-0.11.4/.dir-locals.el
new file mode 100644
index 0000000..22ca922
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/.dir-locals.el
@@ -0,0 +1,3 @@
+( (c-mode . ((c-file-style . "linux"))) )
+((nil . ((compile-command . "LC_ALL=C make -C $(git rev-parse --show-toplevels) -k"))))
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/.doozer.json
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/.doozer.json b/thirdparty/librdkafka-0.11.4/.doozer.json
new file mode 100644
index 0000000..27252da
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/.doozer.json
@@ -0,0 +1,110 @@
+{
+  "targets": {
+    "xenial-amd64": {
+
+      "buildenv": "xenial-amd64",
+      "builddeps": [
+        "build-essential",
+        "python",
+        "zlib1g-dev",
+        "libssl-dev",
+        "libsasl2-dev"
+      ],
+      "buildcmd": [
+        "./configure",
+          "make -j ${PARALLEL}",
+          "make -C tests build"
+      ],
+      "testcmd": [
+          "make -C tests run_local"
+      ],
+    },
+
+    "xenial-i386": {
+      "_comment": "including liblz4-dev here to verify that WITH_LZ4_EXT works",
+      "buildenv": "xenial-i386",
+      "builddeps": [
+        "build-essential",
+        "python",
+        "zlib1g-dev",
+        "libssl-dev",
+        "libsasl2-dev",
+        "liblz4-dev"
+      ],
+      "buildcmd": [
+        "./configure",
+        "make -j ${PARALLEL}",
+        "make -C tests build"
+      ],
+      "testcmd": [
+        "make -C tests run_local"
+      ],
+    },
+
+    "xenial-armhf": {
+
+      "buildenv": "xenial-armhf",
+      "builddeps": [
+        "build-essential",
+        "python",
+        "zlib1g-dev",
+        "libssl-dev",
+        "libsasl2-dev"
+      ],
+      "buildcmd": [
+        "./configure",
+        "make -j ${PARALLEL}",
+          "make -j ${PARALLEL} -C tests build",
+      ],
+      "testcmd": [
+        "cd tests",
+        "./run-test.sh -p1 -l ./merged",
+        "cd .."
+      ],
+    },
+
+    "stretch-mips": {
+
+      "buildenv": "stretch-mips",
+      "builddeps": [
+        "build-essential",
+        "python",
+        "zlib1g-dev",
+        "libssl-dev",
+        "libsasl2-dev"
+      ],
+      "buildcmd": [
+        "./configure",
+        "make -j ${PARALLEL}",
+          "make -j ${PARALLEL} -C tests build",
+      ],
+      "testcmd": [
+        "cd tests",
+        "./run-test.sh -p1 -l ./merged",
+        "cd .."
+      ],
+    },
+
+    "cmake-xenial-amd64": {
+
+      "buildenv": "xenial-amd64",
+      "builddeps": [
+        "build-essential",
+        "python",
+        "zlib1g-dev",
+        "libssl-dev",
+        "libsasl2-dev",
+        "cmake"
+      ],
+      "buildcmd": [
+        "cmake -H. -B_builds -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_BUILD_TYPE=Debug",
+        "cmake --build _builds",
+      ],
+      "testcmd": [
+        "cd _builds",
+        "ctest -VV -R RdKafkaTestBrokerLess"
+      ],
+    }
+  },
+  "artifacts": ["config.log", "Makefile.config", "config.h"]
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/.github/ISSUE_TEMPLATE
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/.github/ISSUE_TEMPLATE b/thirdparty/librdkafka-0.11.4/.github/ISSUE_TEMPLATE
new file mode 100644
index 0000000..eb538b3
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/.github/ISSUE_TEMPLATE
@@ -0,0 +1,32 @@
+Read the FAQ first: https://github.com/edenhill/librdkafka/wiki/FAQ
+
+
+
+Description
+===========
+<your issue description goes here>
+
+
+How to reproduce
+================
+<your steps how to reproduce goes here, or remove section if not relevant>
+
+
+**IMPORTANT**: Always try to reproduce the issue on the latest released version (see https://github.com/edenhill/librdkafka/releases), if it can't be reproduced on the latest version the issue has been fixed.
+
+
+Checklist
+=========
+
+**IMPORTANT**: We will close issues where the checklist has not been completed.
+
+Please provide the following information:
+
+ - [x] librdkafka version (release number or git tag): `<REPLACE with e.g., v0.10.5 or a git sha. NOT "latest" or "current">`
+ - [ ] Apache Kafka version: `<REPLACE with e.g., 0.10.2.3>`
+ - [ ] librdkafka client configuration: `<REPLACE with e.g., message.timeout.ms=123, auto.reset.offset=earliest, ..>`
+ - [ ] Operating system: `<REPLACE with e.g., Centos 5 (x64)>`
+ - [ ] Provide logs (with `debug=..` as necessary) from librdkafka
+ - [ ] Provide broker log excerpts
+ - [ ] Critical issue
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/.gitignore
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/.gitignore b/thirdparty/librdkafka-0.11.4/.gitignore
new file mode 100644
index 0000000..0598bca
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/.gitignore
@@ -0,0 +1,28 @@
+config.h
+config.log*
+config.cache
+Makefile.config
+rdkafka*.pc
+*~
+\#*
+*.o
+*.so
+*.so.?
+*.dylib
+*.a
+*.d
+librdkafka*.lds
+core
+vgcore.*
+*dSYM/
+*.offset
+SOURCES
+gmon.out
+*.gz
+*.bz2
+*.deb
+*.rpm
+staging-docs
+tmp
+stats*.json
+test_report*.json

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/.travis.yml
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/.travis.yml b/thirdparty/librdkafka-0.11.4/.travis.yml
new file mode 100644
index 0000000..4154de5
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/.travis.yml
@@ -0,0 +1,42 @@
+language: c
+cache: ccache
+env:
+- ARCH=x64
+compiler:
+- gcc
+- clang
+os:
+- linux
+- osx
+dist: trusty
+sudo: false
+before_install:
+  - if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then sudo make -C packaging/rpm MOCK_CONFIG=el7-x86_64 prepare_ubuntu ; fi
+before_script:
+ - ccache -s || echo "CCache is not available."
+script:
+- rm -rf artifacts dest
+- mkdir dest artifacts
+- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then CPPFLAGS="-I/usr/local/opt/openssl/include
+  -L/usr/local/opt/openssl/lib" ./configure --enable-static --disable-lz4 --prefix="$PWD/dest" ; else ./configure --enable-static --disable-lz4 --prefix="$PWD/dest" ; fi
+- make -j2 all examples check && make -C tests run_local
+- make install
+- (cd dest && tar cvzf ../artifacts/librdkafka.tar.gz .)
+- if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then sudo make -C packaging/rpm MOCK_CONFIG=el7-x86_64 all copy-artifacts ; fi
+- if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then docker run -it -v $PWD:/v microsoft/dotnet:2-sdk /v/packaging/tools/build-debian.sh /v /v/artifacts/librdkafka-debian9.tgz; fi
+deploy:
+  provider: s3
+  access_key_id:
+    secure: "nGcknL5JZ5XYCEJ96UeDtnLOOidWsfXrk2x91Z9Ip2AyrUtdfZBc8BX16C7SAQbBeb4PQu/OjRBQWTIRqU64ZEQU1Z0lHjxCiGEt5HO0YlXWvZ8OJGAQ0wSmrQED850lWjGW2z5MpDqqxbZyATE8VksW5dtGiHgNuITinVW8Lok="
+  secret_access_key:
+    secure: "J+LygNeoXQImN9E7EARNmcgLpqm6hoRjxwHJaen9opeuSDowKDpZxP7ixSml3BEn2pJJ4kpsdj5A8t5uius+qC4nu9mqSAZcmdKeSmliCbH7kj4J9MR7LBcXk3Uf515QGm7y4nzw+c1PmpteYL5S06Kgqp+KkPRLKTS2NevVZuY="
+  bucket: librdkafka-ci-packages
+  region: us-west-1
+  skip_cleanup: true
+  local-dir: artifacts
+  upload-dir: librdkafka/p-librdkafka__bld-travis__plat-${TRAVIS_OS_NAME}__arch-${ARCH}__tag-${TRAVIS_TAG}__sha-${TRAVIS_COMMIT}__bid-${TRAVIS_JOB_NUMBER}
+  on:
+    condition: "$CC = gcc"
+    repo: edenhill/librdkafka
+    all_branches: true
+    tags: true

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/CMakeLists.txt b/thirdparty/librdkafka-0.11.4/CMakeLists.txt
new file mode 100644
index 0000000..93379e2
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/CMakeLists.txt
@@ -0,0 +1,182 @@
+cmake_minimum_required(VERSION 3.2)
+project(RdKafka)
+
+# Options. No 'RDKAFKA_' prefix to match old C++ code. {
+
+# This option doesn't affect build in fact, only C code
+# (see 'rd_kafka_version_str'). In CMake the build type feature usually used
+# (like Debug, Release, etc.).
+option(WITHOUT_OPTIMIZATION "Disable optimization" OFF)
+
+option(ENABLE_DEVEL "Enable development asserts, checks, etc" OFF)
+option(ENABLE_REFCNT_DEBUG "Enable refcnt debugging" OFF)
+option(ENABLE_SHAREDPTR_DEBUG "Enable sharedptr debugging" OFF)
+
+set(TRYCOMPILE_SRC_DIR "${CMAKE_CURRENT_LIST_DIR}/packaging/cmake/try_compile")
+
+# ZLIB {
+find_package(ZLIB QUIET)
+if(ZLIB_FOUND)
+  set(with_zlib_default ON)
+else()
+  set(with_zlib_default OFF)
+endif()
+option(WITH_ZLIB "With ZLIB" ${with_zlib_default})
+# }
+
+# LibDL {
+try_compile(
+    WITH_LIBDL
+    "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+    "${TRYCOMPILE_SRC_DIR}/dlopen_test.c"
+    LINK_LIBRARIES "${CMAKE_DL_LIBS}"
+)
+# }
+
+# WITH_PLUGINS {
+if(WITH_LIBDL)
+  set(with_plugins_default ON)
+else()
+  set(with_plugins_default OFF)
+endif()
+option(WITH_PLUGINS "With plugin support" ${with_plugins_default})
+# }
+
+# OpenSSL {
+if(WITH_BUNDLED_SSL) # option from 'h2o' parent project
+  set(with_ssl_default ON)
+else()
+  find_package(OpenSSL QUIET)
+  if(OpenSSL_FOUND)
+    set(with_ssl_default ON)
+  else()
+    set(with_ssl_default OFF)
+  endif()
+endif()
+option(WITH_SSL "With SSL" ${with_ssl_default})
+# }
+
+# SASL {
+if(WIN32)
+  set(with_sasl_default ON)
+else()
+  include(FindPkgConfig)
+  pkg_check_modules(SASL libsasl2)
+  if(SASL_FOUND)
+    set(with_sasl_default ON)
+  else()
+    try_compile(
+        WITH_SASL_CYRUS_BOOL
+        "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+        "${TRYCOMPILE_SRC_DIR}/libsasl2_test.c"
+        LINK_LIBRARIES "-lsasl2"
+        )
+     if(WITH_SASL_CYRUS_BOOL)
+        set(with_sasl_default ON)
+        set(SASL_LIBRARIES "-lsasl2")
+     else()
+        set(with_sasl_default OFF)
+     endif()
+  endif()
+endif()
+option(WITH_SASL "With SASL" ${with_sasl_default})
+if(WITH_SASL)
+  if(WITH_SSL)
+    set(WITH_SASL_SCRAM ON)
+  endif()
+  if(NOT WIN32)
+    set(WITH_SASL_CYRUS ON)
+  endif()
+endif()
+# }
+
+# }
+
+option(RDKAFKA_BUILD_EXAMPLES "Build examples" OFF)
+option(RDKAFKA_BUILD_TESTS "Build tests" OFF)
+if(WIN32)
+    option(WITHOUT_WIN32_CONFIG "Avoid including win32_config.h on cmake builds" ON)
+endif(WIN32)
+
+# In:
+# * TRYCOMPILE_SRC_DIR
+# Out:
+# * HAVE_ATOMICS_32
+# * HAVE_ATOMICS_32_SYNC
+# * HAVE_ATOMICS_64
+# * HAVE_ATOMICS_64_SYNC
+# * HAVE_REGEX
+# * HAVE_STRNDUP
+# * LINK_ATOMIC
+include("packaging/cmake/try_compile/rdkafka_setup.cmake")
+
+set(GENERATED_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+
+# In:
+# * WITHOUT_OPTIMIZATION
+# * ENABLE_DEVEL
+# * ENABLE_REFCNT_DEBUG
+# * ENABLE_SHAREDPTR_DEBUG
+# * HAVE_ATOMICS_32
+# * HAVE_ATOMICS_32_SYNC
+# * HAVE_ATOMICS_64
+# * HAVE_ATOMICS_64_SYNC
+# * WITH_ZLIB
+# * WITH_SSL
+# * WITH_SASL
+# * HAVE_REGEX
+# * HAVE_STRNDUP
+configure_file("packaging/cmake/config.h.in" "${GENERATED_DIR}/config.h")
+
+# Installation (https://github.com/forexample/package-example) {
+
+include(GNUInstallDirs)
+
+set(config_install_dir "lib/cmake/${PROJECT_NAME}")
+
+set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
+
+set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake")
+set(targets_export_name "${PROJECT_NAME}Targets")
+set(namespace "${PROJECT_NAME}::")
+
+include(CMakePackageConfigHelpers)
+
+# In:
+#   * targets_export_name
+#   * PROJECT_NAME
+configure_package_config_file(
+    "packaging/cmake/Config.cmake.in"
+    "${project_config}"
+    INSTALL_DESTINATION "${config_install_dir}"
+)
+
+install(
+    FILES "${project_config}"
+    DESTINATION "${config_install_dir}"
+)
+
+install(
+    EXPORT "${targets_export_name}"
+    NAMESPACE "${namespace}"
+    DESTINATION "${config_install_dir}"
+)
+
+install(
+    FILES LICENSES.txt
+    DESTINATION "share/licenses/librdkafka"
+)
+
+# }
+
+add_subdirectory(src)
+add_subdirectory(src-cpp)
+
+if(RDKAFKA_BUILD_EXAMPLES)
+  add_subdirectory(examples)
+endif()
+
+if(RDKAFKA_BUILD_TESTS)
+  enable_testing()
+  add_subdirectory(tests)
+endif()

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/CODE_OF_CONDUCT.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/CODE_OF_CONDUCT.md b/thirdparty/librdkafka-0.11.4/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..dbbde19
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at rdkafka@edenhill.se. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/CONFIGURATION.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/CONFIGURATION.md b/thirdparty/librdkafka-0.11.4/CONFIGURATION.md
new file mode 100644
index 0000000..7bc060f
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/CONFIGURATION.md
@@ -0,0 +1,138 @@
+//@file
+## Global configuration properties
+
+Property                                 | C/P | Range           |       Default | Description              
+-----------------------------------------|-----|-----------------|--------------:|--------------------------
+builtin.features                         |  *  |                 | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support. <br>*Type: CSV flags*
+client.id                                |  *  |                 |       rdkafka | Client identifier. <br>*Type: string*
+metadata.broker.list                     |  *  |                 |               | Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime. <br>*Type: string*
+bootstrap.servers                        |  *  |                 |               | Alias for `metadata.broker.list`
+message.max.bytes                        |  *  | 1000 .. 1000000000 |       1000000 | Maximum Kafka protocol request message size. <br>*Type: integer*
+message.copy.max.bytes                   |  *  | 0 .. 1000000000 |         65535 | Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs. <br>*Type: integer*
+receive.message.max.bytes                |  *  | 1000 .. 2147483647 |     100000000 | Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value is automatically adjusted upwards to be at least `fetch.max.bytes` + 512 to allow for protocol overhead. <br>*Type: integer*
+max.in.flight.requests.per.connection    |  *  | 1 .. 1000000    |       1000000 | Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one. <br>*Type: integer*
+max.in.flight                            |  *  |                 |               | Alias for `max.in.flight.requests.per.connection`
+metadata.request.timeout.ms              |  *  | 10 .. 900000    |         60000 | Non-topic request timeout in milliseconds. This is for metadata requests, etc. <br>*Type: integer*
+topic.metadata.refresh.interval.ms       |  *  | -1 .. 3600000   |        300000 | Topic metadata refresh interval in milliseconds. The metadata is automatically refreshed on error and connect. Use -1 to disable the intervalled refresh. <br>*Type: integer*
+metadata.max.age.ms                      |  *  | 1 .. 86400000   |            -1 | Metadata cache max age. Defaults to metadata.refresh.interval.ms * 3 <br>*Type: integer*
+topic.metadata.refresh.fast.interval.ms  |  *  | 1 .. 60000      |           250 | When a topic loses its leader a new metadata request will be enqueued with this initial interval, exponentially increasing until the topic metadata has been refreshed. This is used to recover quickly from transitioning leader brokers. <br>*Type: integer*
+topic.metadata.refresh.fast.cnt          |  *  | 0 .. 1000       |            10 | *Deprecated: No longer used.* <br>*Type: integer*
+topic.metadata.refresh.sparse            |  *  | true, false     |          true | Sparse metadata requests (consumes less network bandwidth) <br>*Type: boolean*
+topic.blacklist                          |  *  |                 |               | Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist. <br>*Type: pattern list*
+debug                                    |  *  | generic, broker, topic, metadata, feature, queue, msg, protocol, cgrp, security, fetch, interceptor, plugin, consumer, all |               | A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch <br>*Type: CSV flags*
+socket.timeout.ms                        |  *  | 10 .. 300000    |         60000 | Default timeout for network requests. Producer: ProduceRequests will use the lesser value of socket.timeout.ms and remaining message.timeout.ms for the first message in the batch. Consumer: FetchRequests will use fetch.wait.max.ms + socket.timeout.ms.  <br>*Type: integer*
+socket.blocking.max.ms                   |  *  | 1 .. 60000      |          1000 | Maximum time a broker socket operation may block. A lower value improves responsiveness at the expense of slightly higher CPU usage. **Deprecated** <br>*Type: integer*
+socket.send.buffer.bytes                 |  *  | 0 .. 100000000  |             0 | Broker socket send buffer size. System default is used if 0. <br>*Type: integer*
+socket.receive.buffer.bytes              |  *  | 0 .. 100000000  |             0 | Broker socket receive buffer size. System default is used if 0. <br>*Type: integer*
+socket.keepalive.enable                  |  *  | true, false     |         false | Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets <br>*Type: boolean*
+socket.nagle.disable                     |  *  | true, false     |         false | Disable the Nagle algorithm (TCP_NODELAY). <br>*Type: boolean*
+socket.max.fails                         |  *  | 0 .. 1000000    |             1 | Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. NOTE: The connection is automatically re-established. <br>*Type: integer*
+broker.address.ttl                       |  *  | 0 .. 86400000   |          1000 | How long to cache the broker address resolving results (milliseconds). <br>*Type: integer*
+broker.address.family                    |  *  | any, v4, v6     |           any | Allowed broker IP address families: any, v4, v6 <br>*Type: enum value*
+reconnect.backoff.jitter.ms              |  *  | 0 .. 3600000    |           500 | Throttle broker reconnection attempts by this value +-50%. <br>*Type: integer*
+statistics.interval.ms                   |  *  | 0 .. 86400000   |             0 | librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics. <br>*Type: integer*
+enabled_events                           |  *  | 0 .. 2147483647 |             0 | See `rd_kafka_conf_set_events()` <br>*Type: integer*
+error_cb                                 |  *  |                 |               | Error callback (set with rd_kafka_conf_set_error_cb()) <br>*Type: pointer*
+throttle_cb                              |  *  |                 |               | Throttle callback (set with rd_kafka_conf_set_throttle_cb()) <br>*Type: pointer*
+stats_cb                                 |  *  |                 |               | Statistics callback (set with rd_kafka_conf_set_stats_cb()) <br>*Type: pointer*
+log_cb                                   |  *  |                 |               | Log callback (set with rd_kafka_conf_set_log_cb()) <br>*Type: pointer*
+log_level                                |  *  | 0 .. 7          |             6 | Logging level (syslog(3) levels) <br>*Type: integer*
+log.queue                                |  *  | true, false     |         false | Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set. <br>*Type: boolean*
+log.thread.name                          |  *  | true, false     |          true | Print internal thread name in log messages (useful for debugging librdkafka internals) <br>*Type: boolean*
+log.connection.close                     |  *  | true, false     |          true | Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connection.max.idle.ms` value. <br>*Type: boolean*
+socket_cb                                |  *  |                 |               | Socket creation callback to provide race-free CLOEXEC <br>*Type: pointer*
+connect_cb                               |  *  |                 |               | Socket connect callback <br>*Type: pointer*
+closesocket_cb                           |  *  |                 |               | Socket close callback <br>*Type: pointer*
+open_cb                                  |  *  |                 |               | File open callback to provide race-free CLOEXEC <br>*Type: pointer*
+opaque                                   |  *  |                 |               | Application opaque (set with rd_kafka_conf_set_opaque()) <br>*Type: pointer*
+default_topic_conf                       |  *  |                 |               | Default topic configuration for automatically subscribed topics <br>*Type: pointer*
+internal.termination.signal              |  *  | 0 .. 128        |             0 | Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed. <br>*Type: integer*
+api.version.request                      |  *  | true, false     |          true | Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used. <br>*Type: boolean*
+api.version.request.timeout.ms           |  *  | 1 .. 300000     |         10000 | Timeout for broker API version requests. <br>*Type: integer*
+api.version.fallback.ms                  |  *  | 0 .. 604800000  |       1200000 | Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade). <br>*Type: integer*
+broker.version.fallback                  |  *  |                 |         0.9.0 | Older broker versions (<0.10.0) provides no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value, such as 0.10.2.1, enables ApiVersionRequests. <br>*Type: string*
+security.protocol                        |  *  | plaintext, ssl, sasl_plaintext, sasl_ssl |     plaintext | Protocol used to communicate with brokers. <br>*Type: enum value*
+ssl.cipher.suites                        |  *  |                 |               | A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3). <br>*Type: string*
+ssl.key.location                         |  *  |                 |               | Path to client's private key (PEM) used for authentication. <br>*Type: string*
+ssl.key.password                         |  *  |                 |               | Private key passphrase <br>*Type: string*
+ssl.certificate.location                 |  *  |                 |               | Path to client's public key (PEM) used for authentication. <br>*Type: string*
+ssl.ca.location                          |  *  |                 |               | File or directory path to CA certificate(s) for verifying the broker's key. <br>*Type: string*
+ssl.crl.location                         |  *  |                 |               | Path to CRL for verifying broker's certificate validity. <br>*Type: string*
+ssl.keystore.location                    |  *  |                 |               | Path to client's keystore (PKCS#12) used for authentication. <br>*Type: string*
+ssl.keystore.password                    |  *  |                 |               | Client's keystore (PKCS#12) password. <br>*Type: string*
+sasl.mechanisms                          |  *  |                 |        GSSAPI | SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. **NOTE**: Despite the name only one mechanism must be configured. <br>*Type: string*
+sasl.mechanism                           |  *  |                 |               | Alias for `sasl.mechanisms`
+sasl.kerberos.service.name               |  *  |                 |         kafka | Kerberos principal name that Kafka runs as, not including /hostname@REALM <br>*Type: string*
+sasl.kerberos.principal                  |  *  |                 |   kafkaclient | This client's Kerberos principal name. (Not supported on Windows, will use the logon user's principal). <br>*Type: string*
+sasl.kerberos.kinit.cmd                  |  *  |                 | kinit -S "%{sasl.kerberos.service.name}/%{broker.name}" -k -t "%{sasl.kerberos.keytab}" %{sasl.kerberos.principal} | Full kerberos kinit command string, %{config.prop.name} is replaced by corresponding config object value, %{broker.name} returns the broker's hostname. <br>*Type: string*
+sasl.kerberos.keytab                     |  *  |                 |               | Path to Kerberos keytab file. Uses system default if not set.**NOTE**: This is not automatically used but must be added to the template in sasl.kerberos.kinit.cmd as ` ... -t %{sasl.kerberos.keytab}`. <br>*Type: string*
+sasl.kerberos.min.time.before.relogin    |  *  | 1 .. 86400000   |         60000 | Minimum time in milliseconds between key refresh attempts. <br>*Type: integer*
+sasl.username                            |  *  |                 |               | SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms <br>*Type: string*
+sasl.password                            |  *  |                 |               | SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism <br>*Type: string*
+plugin.library.paths                     |  *  |                 |               | List of plugin libaries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically. <br>*Type: string*
+interceptors                             |  *  |                 |               | Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors. <br>*Type: *
+group.id                                 |  *  |                 |               | Client group id string. All clients sharing the same group.id belong to the same group. <br>*Type: string*
+partition.assignment.strategy            |  *  |                 | range,roundrobin | Name of partition assignment strategy to use when elected group leader assigns partitions to group members. <br>*Type: string*
+session.timeout.ms                       |  *  | 1 .. 3600000    |         30000 | Client group session and failure detection timeout. <br>*Type: integer*
+heartbeat.interval.ms                    |  *  | 1 .. 3600000    |          1000 | Group session keepalive heartbeat interval. <br>*Type: integer*
+group.protocol.type                      |  *  |                 |      consumer | Group protocol type <br>*Type: string*
+coordinator.query.interval.ms            |  *  | 1 .. 3600000    |        600000 | How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment. <br>*Type: integer*
+enable.auto.commit                       |  C  | true, false     |          true | Automatically and periodically commit offsets in the background. Note: setting this to false does not prevent the consumer from fetching previously committed start offsets. To circumvent this behaviour set specific start offsets per partition in the call to assign(). <br>*Type: boolean*
+auto.commit.interval.ms                  |  C  | 0 .. 86400000   |          5000 | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. (0 = disable). This setting is used by the high-level consumer. <br>*Type: integer*
+enable.auto.offset.store                 |  C  | true, false     |          true | Automatically store offset of last message provided to application. <br>*Type: boolean*
+queued.min.messages                      |  C  | 1 .. 10000000   |        100000 | Minimum number of messages per topic+partition librdkafka tries to maintain in the local consumer queue. <br>*Type: integer*
+queued.max.messages.kbytes               |  C  | 1 .. 2097151    |       1048576 | Maximum number of kilobytes per topic+partition in the local consumer queue. This value may be overshot by fetch.message.max.bytes. This property has higher priority than queued.min.messages. <br>*Type: integer*
+fetch.wait.max.ms                        |  C  | 0 .. 300000     |           100 | Maximum time the broker may wait to fill the response with fetch.min.bytes. <br>*Type: integer*
+fetch.message.max.bytes                  |  C  | 1 .. 1000000000 |       1048576 | Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched. <br>*Type: integer*
+max.partition.fetch.bytes                |  C  |                 |               | Alias for `fetch.message.max.bytes`
+fetch.max.bytes                          |  C  | 0 .. 2147483135 |      52428800 | Maximum amount of data the broker shall return for a Fetch request. Messages are fetched in batches by the consumer and if the first message batch in the first non-empty partition of the Fetch request is larger than this value, then the message batch will still be returned to ensure the consumer can make progress. The maximum message batch size accepted by the broker is defined via `message.max.bytes` (broker config) or `max.message.bytes` (broker topic config). `fetch.max.bytes` is automatically adjusted upwards to be at least `message.max.bytes` (consumer config). <br>*Type: integer*
+fetch.min.bytes                          |  C  | 1 .. 100000000  |             1 | Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting. <br>*Type: integer*
+fetch.error.backoff.ms                   |  C  | 0 .. 300000     |           500 | How long to postpone the next fetch request for a topic+partition in case of a fetch error. <br>*Type: integer*
+offset.store.method                      |  C  | none, file, broker |        broker | Offset commit store method: 'file' - local file store (offset.store.path, et.al), 'broker' - broker commit store (requires Apache Kafka 0.8.2 or later on the broker). <br>*Type: enum value*
+consume_cb                               |  C  |                 |               | Message consume callback (set with rd_kafka_conf_set_consume_cb()) <br>*Type: pointer*
+rebalance_cb                             |  C  |                 |               | Called after consumer group has been rebalanced (set with rd_kafka_conf_set_rebalance_cb()) <br>*Type: pointer*
+offset_commit_cb                         |  C  |                 |               | Offset commit result propagation callback. (set with rd_kafka_conf_set_offset_commit_cb()) <br>*Type: pointer*
+enable.partition.eof                     |  C  | true, false     |          true | Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition. <br>*Type: boolean*
+check.crcs                               |  C  | true, false     |         false | Verify CRC32 of consumed messages, ensuring no on-the-wire or on-disk corruption to the messages occurred. This check comes at slightly increased CPU usage. <br>*Type: boolean*
+queue.buffering.max.messages             |  P  | 1 .. 10000000   |        100000 | Maximum number of messages allowed on the producer queue. <br>*Type: integer*
+queue.buffering.max.kbytes               |  P  | 1 .. 2097151    |       1048576 | Maximum total message size sum allowed on the producer queue. This property has higher priority than queue.buffering.max.messages. <br>*Type: integer*
+queue.buffering.max.ms                   |  P  | 0 .. 900000     |             0 | Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency. <br>*Type: integer*
+linger.ms                                |  P  |                 |               | Alias for `queue.buffering.max.ms`
+message.send.max.retries                 |  P  | 0 .. 10000000   |             2 | How many times to retry sending a failing MessageSet. **Note:** retrying may cause reordering. <br>*Type: integer*
+retries                                  |  P  |                 |               | Alias for `message.send.max.retries`
+retry.backoff.ms                         |  P  | 1 .. 300000     |           100 | The backoff time in milliseconds before retrying a protocol request. <br>*Type: integer*
+queue.buffering.backpressure.threshold   |  P  | 0 .. 1000000    |            10 | The threshold of outstanding not yet transmitted requests needed to backpressure the producer's message accumulator. A lower number yields larger and more effective batches. <br>*Type: integer*
+compression.codec                        |  P  | none, gzip, snappy, lz4 |          none | compression codec to use for compressing message sets. This is the default value for all topics, may be overriden by the topic configuration property `compression.codec`.  <br>*Type: enum value*
+compression.type                         |  P  |                 |               | Alias for `compression.codec`
+batch.num.messages                       |  P  | 1 .. 1000000    |         10000 | Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by message.max.bytes. <br>*Type: integer*
+delivery.report.only.error               |  P  | true, false     |         false | Only provide delivery reports for failed messages. <br>*Type: boolean*
+dr_cb                                    |  P  |                 |               | Delivery report callback (set with rd_kafka_conf_set_dr_cb()) <br>*Type: pointer*
+dr_msg_cb                                |  P  |                 |               | Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb()) <br>*Type: pointer*
+
+
+## Topic configuration properties
+
+Property                                 | C/P | Range           |       Default | Description              
+-----------------------------------------|-----|-----------------|--------------:|--------------------------
+request.required.acks                    |  P  | -1 .. 1000      |             1 | This field indicates how many acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *1*=Only the leader broker will need to ack the message, *-1* or *all*=broker will block until message is committed by all in sync replicas (ISRs) or broker's `min.insync.replicas` setting before sending response.  <br>*Type: integer*
+acks                                     |  P  |                 |               | Alias for `request.required.acks`
+request.timeout.ms                       |  P  | 1 .. 900000     |          5000 | The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0. <br>*Type: integer*
+message.timeout.ms                       |  P  | 0 .. 900000     |        300000 | Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. <br>*Type: integer*
+queuing.strategy                         |  P  | fifo, lifo      |          fifo | Producer queuing strategy. FIFO preserves produce ordering, while LIFO prioritizes new messages. WARNING: `lifo` is experimental and subject to change or removal. <br>*Type: enum value*
+produce.offset.report                    |  P  | true, false     |         false | Report offset of produced message back to application. The application must be use the `dr_msg_cb` to retrieve the offset from `rd_kafka_message_t.offset`. <br>*Type: boolean*
+partitioner                              |  P  |                 | consistent_random | Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.). <br>*Type: string*
+partitioner_cb                           |  P  |                 |               | Custom partitioner callback (set with rd_kafka_topic_conf_set_partitioner_cb()) <br>*Type: pointer*
+msg_order_cmp                            |  P  |                 |               | Message queue ordering comparator (set with rd_kafka_topic_conf_set_msg_order_cmp()). Also see `queuing.strategy`. <br>*Type: pointer*
+opaque                                   |  *  |                 |               | Application opaque (set with rd_kafka_topic_conf_set_opaque()) <br>*Type: pointer*
+compression.codec                        |  P  | none, gzip, snappy, lz4, inherit |       inherit | Compression codec to use for compressing message sets. inherit = inherit global compression.codec configuration. <br>*Type: enum value*
+compression.type                         |  P  |                 |               | Alias for `compression.codec`
+auto.commit.enable                       |  C  | true, false     |          true | If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). **NOTE:** This property should only be used with the simple legacy consumer, when using the high-level KafkaConsumer the global `enable.auto.commit` property must be used instead. **NOTE:** There is currently no zookeeper integration, offsets will be written to broker or local file according to offset.store.method. <br>*Type: boolean*
+enable.auto.commit                       |  C  |                 |               | Alias for `auto.commit.enable`
+auto.commit.interval.ms                  |  C  | 10 .. 86400000  |         60000 | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. This setting is used by the low-level legacy consumer. <br>*Type: integer*
+auto.offset.reset                        |  C  | smallest, earliest, beginning, largest, latest, end, error |       largest | Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error which is retrieved by consuming messages and checking 'message->err'. <br>*Type: enum value*
+offset.store.path                        |  C  |                 |             . | Path to local file for storing offsets. If the path is a directory a filename will be automatically generated in that directory based on the topic and partition. <br>*Type: string*
+offset.store.sync.interval.ms            |  C  | -1 .. 86400000  |            -1 | fsync() interval for the offset file, in milliseconds. Use -1 to disable syncing, and 0 for immediate sync after each write. <br>*Type: integer*
+offset.store.method                      |  C  | file, broker    |        broker | Offset commit store method: 'file' - local file store (offset.store.path, et.al), 'broker' - broker commit store (requires "group.id" to be configured and Apache Kafka 0.8.2 or later on the broker.). <br>*Type: enum value*
+consume.callback.max.messages            |  C  | 0 .. 1000000    |             0 | Maximum number of messages to dispatch in one `rd_kafka_consume_callback*()` call (0 = unlimited) <br>*Type: integer*
+
+### C/P legend: C = Consumer, P = Producer, * = both

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/7528d23e/thirdparty/librdkafka-0.11.4/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/thirdparty/librdkafka-0.11.4/CONTRIBUTING.md b/thirdparty/librdkafka-0.11.4/CONTRIBUTING.md
new file mode 100644
index 0000000..5da7c77
--- /dev/null
+++ b/thirdparty/librdkafka-0.11.4/CONTRIBUTING.md
@@ -0,0 +1,271 @@
+# Contributing to librdkafka
+
+(This document is based on [curl's CONTRIBUTE.md](https://github.com/curl/curl/blob/master/docs/CONTRIBUTE.md) - thank you!)
+
+This document is intended to offer guidelines on how to best contribute to the
+librdkafka project. This concerns new features as well as bug fixes and
+general improvements.
+
+### License and copyright
+
+When contributing with code, you agree to put your changes and new code under
+the same license librdkafka is already using unless stated and agreed
+otherwise.
+
+When changing existing source code, you do not alter the copyright of the
+original file(s). The copyright will still be owned by the original creator(s)
+or those who have been assigned copyright by the original author(s).
+
+By submitting a patch to the librdkafka, you are assumed to have the right
+to the code and to be allowed by your employer or whatever to hand over that
+patch/code to us. We will credit you for your changes as far as possible, to
+give credit but also to keep a trace back to who made what changes. Please
+always provide us with your full real name when contributing!
+
+Official librdkafka project maintainer(s) assume ownership of all accepted
+submissions.
+
+## Write a good patch
+
+### Follow code style
+
+When writing C code, follow the code style already established in
+the project. Consistent style makes code easier to read and mistakes less
+likely to happen.
+
+See the end of this document for the C style guide to use in librdkafka.
+
+
+### Write Separate Changes
+
+It is annoying when you get a huge patch from someone that is said to fix 511
+odd problems, but discussions and opinions don't agree with 510 of them - or
+509 of them were already fixed in a different way. Then the person merging
+this change needs to extract the single interesting patch from somewhere
+within the huge pile of source, and that gives a lot of extra work.
+
+Preferably, each fix that correct a problem should be in its own patch/commit
+with its own description/commit message stating exactly what they correct so
+that all changes can be selectively applied by the maintainer or other
+interested parties.
+
+Also, separate changes enable bisecting much better when we track problems
+and regression in the future.
+
+### Patch Against Recent Sources
+
+Please try to make your patches against latest master branch.
+
+### Test Cases
+
+Bugfixes should also include a new test case in the regression test suite
+that verifies the bug is fixed.
+Create a new tests/00<freenumber>-<short_bug_description>.c file and
+try to reproduce the issue in its most simple form.
+Verify that the test case fails for earlier versions and passes with your
+bugfix in-place.
+
+New features and APIs should also result in an added test case.
+
+Submitted patches must pass all existing tests.
+For more information on the test suite see [tests/README]
+
+
+
+## How to get your changes into the main sources
+
+File a [pull request on github](https://github.com/edenhill/librdkafka/pulls)
+
+Your change will be reviewed and discussed there and you will be
+expected to correct flaws pointed out and update accordingly, or the change
+risk stalling and eventually just get deleted without action. As a submitter
+of a change, you are the owner of that change until it has been merged.
+
+Make sure to monitor your PR on github and answer questions and/or
+fix nits/flaws. This is very important. We will take lack of replies as a
+sign that you're not very anxious to get your patch accepted and we tend to
+simply drop such changes.
+
+When you adjust your pull requests after review, please squash the
+commits so that we can review the full updated version more easily
+and keep history cleaner.
+
+For example:
+
+    # Interactive rebase to let you squash/fixup commits
+    $ git rebase -i master
+
+    # Mark fixes-on-fixes commits as 'fixup' (or just 'f') in the
+    # first column. These will be silently integrated into the
+    # previous commit, so make sure to move the fixup-commit to
+    # the line beneath the parent commit.
+
+    # Since this probably rewrote the history of previously pushed
+    # commits you will need to make a force push, which is usually
+    # a bad idea but works good for pull requests.
+    $ git push --force origin your_feature_branch
+
+
+### Write good commit messages
+
+A short guide to how to write commit messages in the curl project.
+
+    ---- start ----
+    [area]: [short line describing the main effect] [(#issuenumber)]
+           -- empty line --
+    [full description, no wider than 72 columns that describe as much as
+    possible as to why this change is made, and possibly what things
+    it fixes and everything else that is related]
+    ---- stop ----
+
+Example:
+
+    cgrp: restart query timer on all heartbeat failures (#10023)
+    
+    If unhandled errors were received in HeartbeatResponse
+    the cgrp could get stuck in a state where it would not
+    refresh its coordinator.
+
+
+
+# librdkafka C style guide
+
+## Function and globals naming
+
+Use self-explanatory hierarchical snake-case naming.
+Pretty much all symbols should start with `rd_kafka_`, followed by
+their subsystem (e.g., `cgrp`, `broker`, `buf`, etc..), followed by an
+action (e.g, `find`, `get`, `clear`, ..).
+
+
+## Variable naming
+
+For existing types use the type prefix as variable name.
+The type prefix is typically the first part of struct member fields.
+Example:
+
+  * `rd_kafka_broker_t` has field names starting with `rkb_..`, thus broker
+     variable names should be named `rkb`
+
+
+For other types use reasonably concise but descriptive names.
+`i` and `j` are typical int iterators.
+
+## Variable declaration
+
+Variables must be declared at the head of a scope, no in-line variable
+declarations are allowed.
+
+## Indenting
+
+Use 8 spaces indent, same as the Linux kernel.
+In emacs, use `c-set-style "linux`.
+For C++, use Google's C++ style.
+
+## Comments
+
+Use `/* .. */` comments, not `// ..`
+
+For functions, use doxygen syntax, e.g.:
+
+    /**
+     * @brief <short description>
+     * ..
+     * @returns <something..>
+     */
+
+
+Make sure to comment non-obvious code and situations where the full
+context of an operation is not easily graspable.
+
+Also make sure to update existing comments when the code changes.
+
+
+## Line length
+
+Try hard to keep line length below 80 characters, when this is not possible
+exceed it with reason.
+
+
+## Braces
+
+Braces go on the same line as their enveloping statement:
+
+    int some_func (..) {
+      while (1) {
+        if (1) {
+          do something;
+          ..
+        } else {
+          do something else;
+          ..
+        }
+      }
+ 
+      /* Single line scopes should not have braces */
+      if (1)
+        hi();
+      else if (2)
+        /* Say hello */
+        hello();
+      else
+        bye();
+
+
+## Spaces
+
+All expression parentheses should be prefixed and suffixed with a single space:
+
+    int some_func (int a) {
+
+        if (1)
+          ....;
+
+        for (i = 0 ; i < 19 ; i++) {
+
+
+        }
+    }
+
+
+Use space around operators:
+
+    int a = 2;
+  
+    if (b >= 3)
+       c += 2;
+
+Except for these:
+  
+    d++;
+    --e;
+
+
+## New block on new line
+
+New blocks should be on a new line:
+
+    if (1)
+      new();
+    else
+      old();
+
+
+## Parentheses
+
+Don't assume the reader knows C operator precedence by heart for complex
+statements, add parentheses to ease readability.
+
+
+## ifdef hell
+
+Avoid ifdef's as much as possible.
+Platform support checking should be performed in configure.librdkafka.
+
+
+
+
+
+# librdkafka C++ style guide
+
+Follow [Google's C++ style guide](https://google.github.io/styleguide/cppguide.html)