You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@arrow.apache.org by we...@apache.org on 2017/06/22 13:35:41 UTC

[1/6] arrow git commit: ARROW-1104: Integrate in-memory object store into arrow

Repository: arrow
Updated Branches:
  refs/heads/master ef579ca7e -> 5e3430981


http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/thirdparty/xxhash.cc
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/thirdparty/xxhash.cc b/cpp/src/plasma/thirdparty/xxhash.cc
new file mode 100644
index 0000000..f74880b
--- /dev/null
+++ b/cpp/src/plasma/thirdparty/xxhash.cc
@@ -0,0 +1,889 @@
+/*
+*  xxHash - Fast Hash algorithm
+*  Copyright (C) 2012-2016, Yann Collet
+*
+*  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+*
+*  Redistribution and use in source and binary forms, with or without
+*  modification, are permitted provided that the following conditions are
+*  met:
+*
+*  * Redistributions of source code must retain the above copyright
+*  notice, this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above
+*  copyright notice, this list of conditions and the following disclaimer
+*  in the documentation and/or other materials provided with the
+*  distribution.
+*
+*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+*  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+*  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+*  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+*  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+*  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+*  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+*  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+*  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*  You can contact the author at :
+*  - xxHash homepage: http://www.xxhash.com
+*  - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+
+/* *************************************
+*  Tuning parameters
+***************************************/
+/*!XXH_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
+ *            It can generate buggy code on targets which do not support unaligned memory accesses.
+ *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://stackoverflow.com/a/32095106/646947 for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
+#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+#    define XXH_FORCE_MEMORY_ACCESS 2
+#  elif defined(__INTEL_COMPILER) || \
+  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+#    define XXH_FORCE_MEMORY_ACCESS 1
+#  endif
+#endif
+
+/*!XXH_ACCEPT_NULL_INPUT_POINTER :
+ * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
+ * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
+ * By default, this option is disabled. To enable it, uncomment below define :
+ */
+/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
+
+/*!XXH_FORCE_NATIVE_FORMAT :
+ * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
+ * Results are therefore identical for little-endian and big-endian CPU.
+ * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
+ * Should endian-independence be of no importance for your application, you may set the #define below to 1,
+ * to improve speed for Big-endian CPU.
+ * This option has no impact on Little_Endian CPU.
+ */
+#ifndef XXH_FORCE_NATIVE_FORMAT   /* can be defined externally */
+#  define XXH_FORCE_NATIVE_FORMAT 0
+#endif
+
+/*!XXH_FORCE_ALIGN_CHECK :
+ * This is a minor performance trick, only useful with lots of very small keys.
+ * It means : check for aligned/unaligned input.
+ * The check costs one initial branch per hash; set to 0 when the input data
+ * is guaranteed to be aligned.
+ */
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
+#  if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+#    define XXH_FORCE_ALIGN_CHECK 0
+#  else
+#    define XXH_FORCE_ALIGN_CHECK 1
+#  endif
+#endif
+
+
+/* *************************************
+*  Includes & Memory related functions
+***************************************/
+/*! Modify the local functions below should you wish to use some other memory routines
+*   for malloc(), free() */
+#include <stdlib.h>
+static void* XXH_malloc(size_t s) { return malloc(s); }
+static void  XXH_free  (void* p)  { free(p); }
+/*! and for memcpy() */
+#include <string.h>
+static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
+
+#define XXH_STATIC_LINKING_ONLY
+#include "xxhash.h"
+
+
+/* *************************************
+*  Compiler Specific Options
+***************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  pragma warning(disable : 4127)      /* disable: C4127: conditional expression is constant */
+#  define FORCE_INLINE static __forceinline
+#else
+#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+#    ifdef __GNUC__
+#      define FORCE_INLINE static inline __attribute__((always_inline))
+#    else
+#      define FORCE_INLINE static inline
+#    endif
+#  else
+#    define FORCE_INLINE static
+#  endif /* __STDC_VERSION__ */
+#endif
+
+
+/* *************************************
+*  Basic Types
+***************************************/
+#ifndef MEM_MODULE
+# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+#   include <stdint.h>
+    typedef uint8_t  BYTE;
+    typedef uint16_t U16;
+    typedef uint32_t U32;
+    typedef  int32_t S32;
+# else
+    typedef unsigned char      BYTE;
+    typedef unsigned short     U16;
+    typedef unsigned int       U32;
+    typedef   signed int       S32;
+# endif
+#endif
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U32 u32; } __attribute__((packed)) unalign;
+static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+
+#else
+
+/* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+static U32 XXH_read32(const void* memPtr)
+{
+    U32 val;
+    memcpy(&val, memPtr, sizeof(val));
+    return val;
+}
+
+#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+
+/* ****************************************
+*  Compiler-specific Functions and Macros
+******************************************/
+#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
+#if defined(_MSC_VER)
+#  define XXH_rotl32(x,r) _rotl(x,r)
+#  define XXH_rotl64(x,r) _rotl64(x,r)
+#else
+#  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
+#  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
+#endif
+
+#if defined(_MSC_VER)     /* Visual Studio */
+#  define XXH_swap32 _byteswap_ulong
+#elif XXH_GCC_VERSION >= 403
+#  define XXH_swap32 __builtin_bswap32
+#else
+static U32 XXH_swap32 (U32 x)
+{
+    return  ((x << 24) & 0xff000000 ) |
+            ((x <<  8) & 0x00ff0000 ) |
+            ((x >>  8) & 0x0000ff00 ) |
+            ((x >> 24) & 0x000000ff );
+}
+#endif
+
+
+/* *************************************
+*  Architecture Macros
+***************************************/
+typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
+
+/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
+#ifndef XXH_CPU_LITTLE_ENDIAN
+    static const int g_one = 1;
+#   define XXH_CPU_LITTLE_ENDIAN   (*(const char*)(&g_one))
+#endif
+
+
+/* ***************************
+*  Memory reads
+*****************************/
+typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
+
+FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
+{
+    if (align==XXH_unaligned)
+        return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
+    else
+        return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
+}
+
+FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
+{
+    return XXH_readLE32_align(ptr, endian, XXH_unaligned);
+}
+
+static U32 XXH_readBE32(const void* ptr)
+{
+    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
+}
+
+
+/* *************************************
+*  Macros
+***************************************/
+#define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(int)(!!(c)) }; }    /* use only *after* variable declarations */
+XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
+
+
+/* *******************************************************************
+*  32-bits hash functions
+*********************************************************************/
+static const U32 PRIME32_1 = 2654435761U;
+static const U32 PRIME32_2 = 2246822519U;
+static const U32 PRIME32_3 = 3266489917U;
+static const U32 PRIME32_4 =  668265263U;
+static const U32 PRIME32_5 =  374761393U;
+
+static U32 XXH32_round(U32 seed, U32 input)
+{
+    seed += input * PRIME32_2;
+    seed  = XXH_rotl32(seed, 13);
+    seed *= PRIME32_1;
+    return seed;
+}
+
+FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
+{
+    const BYTE* p = (const BYTE*)input;
+    const BYTE* bEnd = p + len;
+    U32 h32;
+#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+    if (p==NULL) {
+        len=0;
+        bEnd=p=(const BYTE*)(size_t)16;
+    }
+#endif
+
+    if (len>=16) {
+        const BYTE* const limit = bEnd - 16;
+        U32 v1 = seed + PRIME32_1 + PRIME32_2;
+        U32 v2 = seed + PRIME32_2;
+        U32 v3 = seed + 0;
+        U32 v4 = seed - PRIME32_1;
+
+        do {
+            v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
+            v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
+            v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
+            v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
+        } while (p<=limit);
+
+        h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+    } else {
+        h32  = seed + PRIME32_5;
+    }
+
+    h32 += (U32) len;
+
+    while (p+4<=bEnd) {
+        h32 += XXH_get32bits(p) * PRIME32_3;
+        h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
+        p+=4;
+    }
+
+    while (p<bEnd) {
+        h32 += (*p) * PRIME32_5;
+        h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
+        p++;
+    }
+
+    h32 ^= h32 >> 15;
+    h32 *= PRIME32_2;
+    h32 ^= h32 >> 13;
+    h32 *= PRIME32_3;
+    h32 ^= h32 >> 16;
+
+    return h32;
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
+{
+#if 0
+    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+    XXH32_state_t state;
+    XXH32_reset(&state, seed);
+    XXH32_update(&state, input, len);
+    return XXH32_digest(&state);
+#else
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if (XXH_FORCE_ALIGN_CHECK) {
+        if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */
+            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+                return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
+            else
+                return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
+    }   }
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
+    else
+        return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
+#endif
+}
+
+
+
+/*======   Hash streaming   ======*/
+
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
+{
+    return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
+{
+    XXH_free(statePtr);
+    return XXH_OK;
+}
+
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
+{
+    memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
+{
+    XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+    memset(&state, 0, sizeof(state)-4);   /* do not write into reserved, for future removal */
+    state.v1 = seed + PRIME32_1 + PRIME32_2;
+    state.v2 = seed + PRIME32_2;
+    state.v3 = seed + 0;
+    state.v4 = seed - PRIME32_1;
+    memcpy(statePtr, &state, sizeof(state));
+    return XXH_OK;
+}
+
+
+FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
+{
+    const BYTE* p = (const BYTE*)input;
+    const BYTE* const bEnd = p + len;
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+    if (input==NULL) return XXH_ERROR;
+#endif
+
+    state->total_len_32 += (unsigned)len;
+    state->large_len |= (len>=16) | (state->total_len_32>=16);
+
+    if (state->memsize + len < 16)  {   /* fill in tmp buffer */
+        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
+        state->memsize += (unsigned)len;
+        return XXH_OK;
+    }
+
+    if (state->memsize) {   /* some data left from previous update */
+        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
+        {   const U32* p32 = state->mem32;
+            state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
+            state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
+            state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
+            state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
+        }
+        p += 16-state->memsize;
+        state->memsize = 0;
+    }
+
+    if (p <= bEnd-16) {
+        const BYTE* const limit = bEnd - 16;
+        U32 v1 = state->v1;
+        U32 v2 = state->v2;
+        U32 v3 = state->v3;
+        U32 v4 = state->v4;
+
+        do {
+            v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
+            v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
+            v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
+            v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
+        } while (p<=limit);
+
+        state->v1 = v1;
+        state->v2 = v2;
+        state->v3 = v3;
+        state->v4 = v4;
+    }
+
+    if (p < bEnd) {
+        XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
+        state->memsize = (unsigned)(bEnd-p);
+    }
+
+    return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
+{
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
+    else
+        return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+
+
+FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
+{
+    const BYTE * p = (const BYTE*)state->mem32;
+    const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
+    U32 h32;
+
+    if (state->large_len) {
+        h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
+    } else {
+        h32 = state->v3 /* == seed */ + PRIME32_5;
+    }
+
+    h32 += state->total_len_32;
+
+    while (p+4<=bEnd) {
+        h32 += XXH_readLE32(p, endian) * PRIME32_3;
+        h32  = XXH_rotl32(h32, 17) * PRIME32_4;
+        p+=4;
+    }
+
+    while (p<bEnd) {
+        h32 += (*p) * PRIME32_5;
+        h32  = XXH_rotl32(h32, 11) * PRIME32_1;
+        p++;
+    }
+
+    h32 ^= h32 >> 15;
+    h32 *= PRIME32_2;
+    h32 ^= h32 >> 13;
+    h32 *= PRIME32_3;
+    h32 ^= h32 >> 16;
+
+    return h32;
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
+{
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH32_digest_endian(state_in, XXH_littleEndian);
+    else
+        return XXH32_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+/*======   Canonical representation   ======*/
+
+/*! Default XXH result types are basic unsigned 32 and 64 bits.
+*   The canonical representation follows human-readable write convention, aka big-endian (large digits first).
+*   These functions allow transformation of hash result into and from its canonical format.
+*   This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
+*/
+
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
+{
+    XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
+    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
+    memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
+{
+    return XXH_readBE32(src);
+}
+
+
+#ifndef XXH_NO_LONG_LONG
+
+/* *******************************************************************
+*  64-bits hash functions
+*********************************************************************/
+
+/*======   Memory access   ======*/
+
+#ifndef MEM_MODULE
+# define MEM_MODULE
+# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+#   include <stdint.h>
+    typedef uint64_t U64;
+# else
+    typedef unsigned long long U64;   /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
+# endif
+#endif
+
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64;
+static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
+
+#else
+
+/* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+
+static U64 XXH_read64(const void* memPtr)
+{
+    U64 val;
+    memcpy(&val, memPtr, sizeof(val));
+    return val;
+}
+
+#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+#if defined(_MSC_VER)     /* Visual Studio */
+#  define XXH_swap64 _byteswap_uint64
+#elif XXH_GCC_VERSION >= 403
+#  define XXH_swap64 __builtin_bswap64
+#else
+static U64 XXH_swap64 (U64 x)
+{
+    return  ((x << 56) & 0xff00000000000000ULL) |
+            ((x << 40) & 0x00ff000000000000ULL) |
+            ((x << 24) & 0x0000ff0000000000ULL) |
+            ((x << 8)  & 0x000000ff00000000ULL) |
+            ((x >> 8)  & 0x00000000ff000000ULL) |
+            ((x >> 24) & 0x0000000000ff0000ULL) |
+            ((x >> 40) & 0x000000000000ff00ULL) |
+            ((x >> 56) & 0x00000000000000ffULL);
+}
+#endif
+
+FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
+{
+    if (align==XXH_unaligned)
+        return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
+    else
+        return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
+}
+
+FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
+{
+    return XXH_readLE64_align(ptr, endian, XXH_unaligned);
+}
+
+static U64 XXH_readBE64(const void* ptr)
+{
+    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
+}
+
+
+/*======   xxh64   ======*/
+
+static const U64 PRIME64_1 = 11400714785074694791ULL;
+static const U64 PRIME64_2 = 14029467366897019727ULL;
+static const U64 PRIME64_3 =  1609587929392839161ULL;
+static const U64 PRIME64_4 =  9650029242287828579ULL;
+static const U64 PRIME64_5 =  2870177450012600261ULL;
+
+static U64 XXH64_round(U64 acc, U64 input)
+{
+    acc += input * PRIME64_2;
+    acc  = XXH_rotl64(acc, 31);
+    acc *= PRIME64_1;
+    return acc;
+}
+
+static U64 XXH64_mergeRound(U64 acc, U64 val)
+{
+    val  = XXH64_round(0, val);
+    acc ^= val;
+    acc  = acc * PRIME64_1 + PRIME64_4;
+    return acc;
+}
+
+FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
+{
+    const BYTE* p = (const BYTE*)input;
+    const BYTE* bEnd = p + len;
+    U64 h64;
+#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+    if (p==NULL) {
+        len=0;
+        bEnd=p=(const BYTE*)(size_t)32;
+    }
+#endif
+
+    if (len>=32) {
+        const BYTE* const limit = bEnd - 32;
+        U64 v1 = seed + PRIME64_1 + PRIME64_2;
+        U64 v2 = seed + PRIME64_2;
+        U64 v3 = seed + 0;
+        U64 v4 = seed - PRIME64_1;
+
+        do {
+            v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
+            v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
+            v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
+            v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
+        } while (p<=limit);
+
+        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+        h64 = XXH64_mergeRound(h64, v1);
+        h64 = XXH64_mergeRound(h64, v2);
+        h64 = XXH64_mergeRound(h64, v3);
+        h64 = XXH64_mergeRound(h64, v4);
+
+    } else {
+        h64  = seed + PRIME64_5;
+    }
+
+    h64 += (U64) len;
+
+    while (p+8<=bEnd) {
+        U64 const k1 = XXH64_round(0, XXH_get64bits(p));
+        h64 ^= k1;
+        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
+        p+=8;
+    }
+
+    if (p+4<=bEnd) {
+        h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
+        h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+        p+=4;
+    }
+
+    while (p<bEnd) {
+        h64 ^= (*p) * PRIME64_5;
+        h64 = XXH_rotl64(h64, 11) * PRIME64_1;
+        p++;
+    }
+
+    h64 ^= h64 >> 33;
+    h64 *= PRIME64_2;
+    h64 ^= h64 >> 29;
+    h64 *= PRIME64_3;
+    h64 ^= h64 >> 32;
+
+    return h64;
+}
+
+
+XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
+{
+#if 0
+    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+    XXH64_state_t state;
+    XXH64_reset(&state, seed);
+    XXH64_update(&state, input, len);
+    return XXH64_digest(&state);
+#else
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if (XXH_FORCE_ALIGN_CHECK) {
+        if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */
+            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+                return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
+            else
+                return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
+    }   }
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
+    else
+        return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
+#endif
+}
+
+/*======   Hash Streaming   ======*/
+
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
+{
+    return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
+{
+    XXH_free(statePtr);
+    return XXH_OK;
+}
+
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
+{
+    memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
+{
+    XXH64_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+    memset(&state, 0, sizeof(state)-8);   /* do not write into reserved, for future removal */
+    state.v1 = seed + PRIME64_1 + PRIME64_2;
+    state.v2 = seed + PRIME64_2;
+    state.v3 = seed + 0;
+    state.v4 = seed - PRIME64_1;
+    memcpy(statePtr, &state, sizeof(state));
+    return XXH_OK;
+}
+
+FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
+{
+    const BYTE* p = (const BYTE*)input;
+    const BYTE* const bEnd = p + len;
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+    if (input==NULL) return XXH_ERROR;
+#endif
+
+    state->total_len += len;
+
+    if (state->memsize + len < 32) {  /* fill in tmp buffer */
+        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
+        state->memsize += (U32)len;
+        return XXH_OK;
+    }
+
+    if (state->memsize) {   /* tmp buffer is full */
+        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
+        state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
+        state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
+        state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
+        state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
+        p += 32-state->memsize;
+        state->memsize = 0;
+    }
+
+    if (p+32 <= bEnd) {
+        const BYTE* const limit = bEnd - 32;
+        U64 v1 = state->v1;
+        U64 v2 = state->v2;
+        U64 v3 = state->v3;
+        U64 v4 = state->v4;
+
+        do {
+            v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
+            v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
+            v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
+            v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
+        } while (p<=limit);
+
+        state->v1 = v1;
+        state->v2 = v2;
+        state->v3 = v3;
+        state->v4 = v4;
+    }
+
+    if (p < bEnd) {
+        XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
+        state->memsize = (unsigned)(bEnd-p);
+    }
+
+    return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
+{
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
+    else
+        return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
+{
+    const BYTE * p = (const BYTE*)state->mem64;
+    const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
+    U64 h64;
+
+    if (state->total_len >= 32) {
+        U64 const v1 = state->v1;
+        U64 const v2 = state->v2;
+        U64 const v3 = state->v3;
+        U64 const v4 = state->v4;
+
+        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+        h64 = XXH64_mergeRound(h64, v1);
+        h64 = XXH64_mergeRound(h64, v2);
+        h64 = XXH64_mergeRound(h64, v3);
+        h64 = XXH64_mergeRound(h64, v4);
+    } else {
+        h64  = state->v3 + PRIME64_5;
+    }
+
+    h64 += (U64) state->total_len;
+
+    while (p+8<=bEnd) {
+        U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
+        h64 ^= k1;
+        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
+        p+=8;
+    }
+
+    if (p+4<=bEnd) {
+        h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
+        h64  = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+        p+=4;
+    }
+
+    while (p<bEnd) {
+        h64 ^= (*p) * PRIME64_5;
+        h64  = XXH_rotl64(h64, 11) * PRIME64_1;
+        p++;
+    }
+
+    h64 ^= h64 >> 33;
+    h64 *= PRIME64_2;
+    h64 ^= h64 >> 29;
+    h64 *= PRIME64_3;
+    h64 ^= h64 >> 32;
+
+    return h64;
+}
+
+XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
+{
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH64_digest_endian(state_in, XXH_littleEndian);
+    else
+        return XXH64_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+/*====== Canonical representation   ======*/
+
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
+{
+    XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
+    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
+    memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
+{
+    return XXH_readBE64(src);
+}
+
+#endif  /* XXH_NO_LONG_LONG */

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/thirdparty/xxhash.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/thirdparty/xxhash.h b/cpp/src/plasma/thirdparty/xxhash.h
new file mode 100644
index 0000000..9d831e0
--- /dev/null
+++ b/cpp/src/plasma/thirdparty/xxhash.h
@@ -0,0 +1,293 @@
+/*
+   xxHash - Extremely Fast Hash algorithm
+   Header File
+   Copyright (C) 2012-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+/* Notice extracted from xxHash homepage :
+
+xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
+It also successfully passes all tests from the SMHasher suite.
+
+Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
+
+Name            Speed       Q.Score   Author
+xxHash          5.4 GB/s     10
+CrapWow         3.2 GB/s      2       Andrew
+MumurHash 3a    2.7 GB/s     10       Austin Appleby
+SpookyHash      2.0 GB/s     10       Bob Jenkins
+SBox            1.4 GB/s      9       Bret Mulvey
+Lookup3         1.2 GB/s      9       Bob Jenkins
+SuperFastHash   1.2 GB/s      1       Paul Hsieh
+CityHash64      1.05 GB/s    10       Pike & Alakuijala
+FNV             0.55 GB/s     5       Fowler, Noll, Vo
+CRC32           0.43 GB/s     9
+MD5-32          0.33 GB/s    10       Ronald L. Rivest
+SHA1-32         0.28 GB/s    10
+
+Q.Score is a measure of quality of the hash function.
+It depends on successfully passing SMHasher test set.
+10 is a perfect score.
+
+A 64-bits version, named XXH64, is available since r35.
+It offers much better speed, but for 64-bits applications only.
+Name     Speed on 64 bits    Speed on 32 bits
+XXH64       13.8 GB/s            1.9 GB/s
+XXH32        6.8 GB/s            6.0 GB/s
+*/
+
+#ifndef XXHASH_H_5627135585666179
+#define XXHASH_H_5627135585666179 1
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* ****************************
+*  Definitions
+******************************/
+#include <stddef.h>   /* size_t */
+typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
+
+
+/* ****************************
+*  API modifier
+******************************/
+/** XXH_PRIVATE_API
+*   This is useful to include xxhash functions in `static` mode
+*   in order to inline them, and remove their symbol from the public list.
+*   Methodology :
+*     #define XXH_PRIVATE_API
+*     #include "xxhash.h"
+*   `xxhash.c` is automatically included.
+*   It's not useful to compile and link it as a separate module.
+*/
+#ifdef XXH_PRIVATE_API
+#  ifndef XXH_STATIC_LINKING_ONLY
+#    define XXH_STATIC_LINKING_ONLY
+#  endif
+#  if defined(__GNUC__)
+#    define XXH_PUBLIC_API static __inline __attribute__((unused))
+#  elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#    define XXH_PUBLIC_API static inline
+#  elif defined(_MSC_VER)
+#    define XXH_PUBLIC_API static __inline
+#  else
+#    define XXH_PUBLIC_API static   /* this version may generate warnings for unused static functions; disable the relevant warning */
+#  endif
+#else
+#  define XXH_PUBLIC_API   /* do nothing */
+#endif /* XXH_PRIVATE_API */
+
+/*!XXH_NAMESPACE, aka Namespace Emulation :
+
+If you want to include _and expose_ xxHash functions from within your own library,
+but also want to avoid symbol collisions with other libraries which may also include xxHash,
+
+you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
+with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values).
+
+Note that no change is required within the calling program as long as it includes `xxhash.h` :
+regular symbol name will be automatically translated by this header.
+*/
+#ifdef XXH_NAMESPACE
+#  define XXH_CAT(A,B) A##B
+#  define XXH_NAME2(A,B) XXH_CAT(A,B)
+#  define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
+#  define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
+#  define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
+#  define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
+#  define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
+#  define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
+#  define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
+#  define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
+#  define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
+#  define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
+#  define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
+#  define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
+#  define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
+#  define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
+#  define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
+#  define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
+#  define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
+#  define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
+#  define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
+#endif
+
+
+/* *************************************
+*  Version
+***************************************/
+#define XXH_VERSION_MAJOR    0
+#define XXH_VERSION_MINOR    6
+#define XXH_VERSION_RELEASE  2
+#define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
+XXH_PUBLIC_API unsigned XXH_versionNumber (void);
+
+
+/*-**********************************************************************
+*  32-bits hash
+************************************************************************/
+typedef unsigned int XXH32_hash_t;
+
+/*! XXH32() :
+    Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input".
+    The memory between input & input+length must be valid (allocated and read-accessible).
+    "seed" can be used to alter the result predictably.
+    Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */
+XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
+
+/*======   Streaming   ======*/
+typedef struct XXH32_state_s XXH32_state_t;   /* incomplete type */
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
+XXH_PUBLIC_API XXH_errorcode  XXH32_freeState(XXH32_state_t* statePtr);
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset  (XXH32_state_t* statePtr, unsigned int seed);
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
+XXH_PUBLIC_API XXH32_hash_t  XXH32_digest (const XXH32_state_t* statePtr);
+
+/*
+These functions generate the xxHash of an input provided in multiple segments.
+Note that, for small input, they are slower than single-call functions, due to state management.
+For small input, prefer `XXH32()` and `XXH64()` .
+
+XXH state must first be allocated, using XXH*_createState() .
+
+Start a new hash by initializing state with a seed, using XXH*_reset().
+
+Then, feed the hash state by calling XXH*_update() as many times as necessary.
+Obviously, input must be allocated and read accessible.
+The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
+
+Finally, a hash value can be produced anytime, by using XXH*_digest().
+This function returns the nn-bits hash as an int or long long.
+
+It's still possible to continue inserting input into the hash state after a digest,
+and generate some new hashes later on, by calling again XXH*_digest().
+
+When done, free XXH state space if it was allocated dynamically.
+*/
+
+/*======   Canonical representation   ======*/
+
+typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
+
+/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
+*  The canonical representation uses human-readable write convention, aka big-endian (large digits first).
+*  These functions allow transformation of hash result into and from its canonical format.
+*  This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
+*/
+
+
+#ifndef XXH_NO_LONG_LONG
+/*-**********************************************************************
+*  64-bits hash
+************************************************************************/
+typedef unsigned long long XXH64_hash_t;
+
+/*! XXH64() :
+    Calculate the 64-bits hash of sequence of length "len" stored at memory address "input".
+    "seed" can be used to alter the result predictably.
+    This function runs faster on 64-bits systems, but slower on 32-bits systems (see benchmark).
+*/
+XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
+
+/*======   Streaming   ======*/
+typedef struct XXH64_state_s XXH64_state_t;   /* incomplete type */
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
+XXH_PUBLIC_API XXH_errorcode  XXH64_freeState(XXH64_state_t* statePtr);
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset  (XXH64_state_t* statePtr, unsigned long long seed);
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
+XXH_PUBLIC_API XXH64_hash_t  XXH64_digest (const XXH64_state_t* statePtr);
+
+/*======   Canonical representation   ======*/
+typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
+#endif  /* XXH_NO_LONG_LONG */
+
+
+#ifdef XXH_STATIC_LINKING_ONLY
+
+/* ================================================================================================
+   This section contains definitions which are not guaranteed to remain stable.
+   They may change in future versions, becoming incompatible with a different version of the library.
+   They shall only be used with static linking.
+   Never use these definitions in association with dynamic linking !
+=================================================================================================== */
+
+/* These definitions are only meant to make possible
+   static allocation of XXH state, on stack or in a struct for example.
+   Never use members directly. */
+
+struct XXH32_state_s {
+   unsigned total_len_32;
+   unsigned large_len;
+   unsigned v1;
+   unsigned v2;
+   unsigned v3;
+   unsigned v4;
+   unsigned mem32[4];   /* buffer defined as U32 for alignment */
+   unsigned memsize;
+   unsigned reserved;   /* never read nor write, will be removed in a future version */
+};   /* typedef'd to XXH32_state_t */
+
+#ifndef XXH_NO_LONG_LONG   /* remove 64-bits support */
+struct XXH64_state_s {
+   unsigned long long total_len;
+   unsigned long long v1;
+   unsigned long long v2;
+   unsigned long long v3;
+   unsigned long long v4;
+   unsigned long long mem64[4];   /* buffer defined as U64 for alignment */
+   unsigned memsize;
+   unsigned reserved[2];          /* never read nor write, will be removed in a future version */
+};   /* typedef'd to XXH64_state_t */
+#endif
+
+#ifdef XXH_PRIVATE_API
+#  include "xxhash.c"   /* include xxhash function bodies as `static`, for inlining */
+#endif
+
+#endif /* XXH_STATIC_LINKING_ONLY */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* XXHASH_H_5627135585666179 */

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/dev/release/run-rat.sh
----------------------------------------------------------------------
diff --git a/dev/release/run-rat.sh b/dev/release/run-rat.sh
index f225c66..757604f 100755
--- a/dev/release/run-rat.sh
+++ b/dev/release/run-rat.sh
@@ -59,6 +59,17 @@ $RAT $1 \
   -e arrow-glib-sections.txt \
   -e arrow-glib-overrides.txt \
   -e gtk-doc.make \
+  -e ae.c \
+  -e ae.h \
+  -e ae_epoll.c \
+  -e ae_evport.c \
+  -e ae_kqueue.c \
+  -e ae_select.c \
+  -e config.h \
+  -e zmalloc.h \
+  -e dlmalloc.c \
+  -e xxhash.cc \
+  -e xxhash.h \
   -e "*.html" \
   -e "*.sgml" \
   -e "*.css" \


[3/6] arrow git commit: ARROW-1104: Integrate in-memory object store into arrow

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/thirdparty/ae/ae_evport.c
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/thirdparty/ae/ae_evport.c b/cpp/src/plasma/thirdparty/ae/ae_evport.c
new file mode 100644
index 0000000..5c317be
--- /dev/null
+++ b/cpp/src/plasma/thirdparty/ae/ae_evport.c
@@ -0,0 +1,320 @@
+/* ae.c module for illumos event ports.
+ *
+ * Copyright (c) 2012, Joyent, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *   * Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *   * Neither the name of Redis nor the names of its contributors may be used
+ *     to endorse or promote products derived from this software without
+ *     specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <assert.h>
+#include <errno.h>
+#include <port.h>
+#include <poll.h>
+
+#include <sys/types.h>
+#include <sys/time.h>
+
+#include <stdio.h>
+
+static int evport_debug = 0;
+
+/*
+ * This file implements the ae API using event ports, present on Solaris-based
+ * systems since Solaris 10.  Using the event port interface, we associate file
+ * descriptors with the port.  Each association also includes the set of poll(2)
+ * events that the consumer is interested in (e.g., POLLIN and POLLOUT).
+ *
+ * There's one tricky piece to this implementation: when we return events via
+ * aeApiPoll, the corresponding file descriptors become dissociated from the
+ * port.  This is necessary because poll events are level-triggered, so if the
+ * fd didn't become dissociated, it would immediately fire another event since
+ * the underlying state hasn't changed yet.  We must re-associate the file
+ * descriptor, but only after we know that our caller has actually read from it.
+ * The ae API does not tell us exactly when that happens, but we do know that
+ * it must happen by the time aeApiPoll is called again.  Our solution is to
+ * keep track of the last fds returned by aeApiPoll and re-associate them next
+ * time aeApiPoll is invoked.
+ *
+ * To summarize, in this module, each fd association is EITHER (a) represented
+ * only via the in-kernel association OR (b) represented by pending_fds and
+ * pending_masks.  (b) is only true for the last fds we returned from aeApiPoll,
+ * and only until we enter aeApiPoll again (at which point we restore the
+ * in-kernel association).
+ */
+#define MAX_EVENT_BATCHSZ 512
+
+typedef struct aeApiState {
+    int     portfd;                             /* event port */
+    int     npending;                           /* # of pending fds */
+    int     pending_fds[MAX_EVENT_BATCHSZ];     /* pending fds */
+    int     pending_masks[MAX_EVENT_BATCHSZ];   /* pending fds' masks */
+} aeApiState;
+
+static int aeApiCreate(aeEventLoop *eventLoop) {
+    int i;
+    aeApiState *state = zmalloc(sizeof(aeApiState));
+    if (!state) return -1;
+
+    state->portfd = port_create();
+    if (state->portfd == -1) {
+        zfree(state);
+        return -1;
+    }
+
+    state->npending = 0;
+
+    for (i = 0; i < MAX_EVENT_BATCHSZ; i++) {
+        state->pending_fds[i] = -1;
+        state->pending_masks[i] = AE_NONE;
+    }
+
+    eventLoop->apidata = state;
+    return 0;
+}
+
+static int aeApiResize(aeEventLoop *eventLoop, int setsize) {
+    /* Nothing to resize here. */
+    return 0;
+}
+
+static void aeApiFree(aeEventLoop *eventLoop) {
+    aeApiState *state = eventLoop->apidata;
+
+    close(state->portfd);
+    zfree(state);
+}
+
+static int aeApiLookupPending(aeApiState *state, int fd) {
+    int i;
+
+    for (i = 0; i < state->npending; i++) {
+        if (state->pending_fds[i] == fd)
+            return (i);
+    }
+
+    return (-1);
+}
+
+/*
+ * Helper function to invoke port_associate for the given fd and mask.
+ */
+static int aeApiAssociate(const char *where, int portfd, int fd, int mask) {
+    int events = 0;
+    int rv, err;
+
+    if (mask & AE_READABLE)
+        events |= POLLIN;
+    if (mask & AE_WRITABLE)
+        events |= POLLOUT;
+
+    if (evport_debug)
+        fprintf(stderr, "%s: port_associate(%d, 0x%x) = ", where, fd, events);
+
+    rv = port_associate(portfd, PORT_SOURCE_FD, fd, events,
+        (void *)(uintptr_t)mask);
+    err = errno;
+
+    if (evport_debug)
+        fprintf(stderr, "%d (%s)\n", rv, rv == 0 ? "no error" : strerror(err));
+
+    if (rv == -1) {
+        fprintf(stderr, "%s: port_associate: %s\n", where, strerror(err));
+
+        if (err == EAGAIN)
+            fprintf(stderr, "aeApiAssociate: event port limit exceeded.");
+    }
+
+    return rv;
+}
+
+static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) {
+    aeApiState *state = eventLoop->apidata;
+    int fullmask, pfd;
+
+    if (evport_debug)
+        fprintf(stderr, "aeApiAddEvent: fd %d mask 0x%x\n", fd, mask);
+
+    /*
+     * Since port_associate's "events" argument replaces any existing events, we
+     * must be sure to include whatever events are already associated when
+     * we call port_associate() again.
+     */
+    fullmask = mask | eventLoop->events[fd].mask;
+    pfd = aeApiLookupPending(state, fd);
+
+    if (pfd != -1) {
+        /*
+         * This fd was recently returned from aeApiPoll.  It should be safe to
+         * assume that the consumer has processed that poll event, but we play
+         * it safer by simply updating pending_mask.  The fd will be
+         * re-associated as usual when aeApiPoll is called again.
+         */
+        if (evport_debug)
+            fprintf(stderr, "aeApiAddEvent: adding to pending fd %d\n", fd);
+        state->pending_masks[pfd] |= fullmask;
+        return 0;
+    }
+
+    return (aeApiAssociate("aeApiAddEvent", state->portfd, fd, fullmask));
+}
+
+static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) {
+    aeApiState *state = eventLoop->apidata;
+    int fullmask, pfd;
+
+    if (evport_debug)
+        fprintf(stderr, "del fd %d mask 0x%x\n", fd, mask);
+
+    pfd = aeApiLookupPending(state, fd);
+
+    if (pfd != -1) {
+        if (evport_debug)
+            fprintf(stderr, "deleting event from pending fd %d\n", fd);
+
+        /*
+         * This fd was just returned from aeApiPoll, so it's not currently
+         * associated with the port.  All we need to do is update
+         * pending_mask appropriately.
+         */
+        state->pending_masks[pfd] &= ~mask;
+
+        if (state->pending_masks[pfd] == AE_NONE)
+            state->pending_fds[pfd] = -1;
+
+        return;
+    }
+
+    /*
+     * The fd is currently associated with the port.  Like with the add case
+     * above, we must look at the full mask for the file descriptor before
+     * updating that association.  We don't have a good way of knowing what the
+     * events are without looking into the eventLoop state directly.  We rely on
+     * the fact that our caller has already updated the mask in the eventLoop.
+     */
+
+    fullmask = eventLoop->events[fd].mask;
+    if (fullmask == AE_NONE) {
+        /*
+         * We're removing *all* events, so use port_dissociate to remove the
+         * association completely.  Failure here indicates a bug.
+         */
+        if (evport_debug)
+            fprintf(stderr, "aeApiDelEvent: port_dissociate(%d)\n", fd);
+
+        if (port_dissociate(state->portfd, PORT_SOURCE_FD, fd) != 0) {
+            perror("aeApiDelEvent: port_dissociate");
+            abort(); /* will not return */
+        }
+    } else if (aeApiAssociate("aeApiDelEvent", state->portfd, fd,
+        fullmask) != 0) {
+        /*
+         * ENOMEM is a potentially transient condition, but the kernel won't
+         * generally return it unless things are really bad.  EAGAIN indicates
+         * we've reached an resource limit, for which it doesn't make sense to
+         * retry (counter-intuitively).  All other errors indicate a bug.  In any
+         * of these cases, the best we can do is to abort.
+         */
+        abort(); /* will not return */
+    }
+}
+
+static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) {
+    aeApiState *state = eventLoop->apidata;
+    struct timespec timeout, *tsp;
+    int mask, i;
+    uint_t nevents;
+    port_event_t event[MAX_EVENT_BATCHSZ];
+
+    /*
+     * If we've returned fd events before, we must re-associate them with the
+     * port now, before calling port_get().  See the block comment at the top of
+     * this file for an explanation of why.
+     */
+    for (i = 0; i < state->npending; i++) {
+        if (state->pending_fds[i] == -1)
+            /* This fd has since been deleted. */
+            continue;
+
+        if (aeApiAssociate("aeApiPoll", state->portfd,
+            state->pending_fds[i], state->pending_masks[i]) != 0) {
+            /* See aeApiDelEvent for why this case is fatal. */
+            abort();
+        }
+
+        state->pending_masks[i] = AE_NONE;
+        state->pending_fds[i] = -1;
+    }
+
+    state->npending = 0;
+
+    if (tvp != NULL) {
+        timeout.tv_sec = tvp->tv_sec;
+        timeout.tv_nsec = tvp->tv_usec * 1000;
+        tsp = &timeout;
+    } else {
+        tsp = NULL;
+    }
+
+    /*
+     * port_getn can return with errno == ETIME having returned some events (!).
+     * So if we get ETIME, we check nevents, too.
+     */
+    nevents = 1;
+    if (port_getn(state->portfd, event, MAX_EVENT_BATCHSZ, &nevents,
+        tsp) == -1 && (errno != ETIME || nevents == 0)) {
+        if (errno == ETIME || errno == EINTR)
+            return 0;
+
+        /* Any other error indicates a bug. */
+        perror("aeApiPoll: port_get");
+        abort();
+    }
+
+    state->npending = nevents;
+
+    for (i = 0; i < nevents; i++) {
+            mask = 0;
+            if (event[i].portev_events & POLLIN)
+                mask |= AE_READABLE;
+            if (event[i].portev_events & POLLOUT)
+                mask |= AE_WRITABLE;
+
+            eventLoop->fired[i].fd = event[i].portev_object;
+            eventLoop->fired[i].mask = mask;
+
+            if (evport_debug)
+                fprintf(stderr, "aeApiPoll: fd %d mask 0x%x\n",
+                    (int)event[i].portev_object, mask);
+
+            state->pending_fds[i] = event[i].portev_object;
+            state->pending_masks[i] = (uintptr_t)event[i].portev_user;
+    }
+
+    return nevents;
+}
+
+static char *aeApiName(void) {
+    return "evport";
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/thirdparty/ae/ae_kqueue.c
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/thirdparty/ae/ae_kqueue.c b/cpp/src/plasma/thirdparty/ae/ae_kqueue.c
new file mode 100644
index 0000000..6796f4c
--- /dev/null
+++ b/cpp/src/plasma/thirdparty/ae/ae_kqueue.c
@@ -0,0 +1,138 @@
+/* Kqueue(2)-based ae.c module
+ *
+ * Copyright (C) 2009 Harish Mallipeddi - harish.mallipeddi@gmail.com
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *   * Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *   * Neither the name of Redis nor the names of its contributors may be used
+ *     to endorse or promote products derived from this software without
+ *     specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/time.h>
+
+typedef struct aeApiState {
+    int kqfd;
+    struct kevent *events;
+} aeApiState;
+
+static int aeApiCreate(aeEventLoop *eventLoop) {
+    aeApiState *state = zmalloc(sizeof(aeApiState));
+
+    if (!state) return -1;
+    state->events = zmalloc(sizeof(struct kevent)*eventLoop->setsize);
+    if (!state->events) {
+        zfree(state);
+        return -1;
+    }
+    state->kqfd = kqueue();
+    if (state->kqfd == -1) {
+        zfree(state->events);
+        zfree(state);
+        return -1;
+    }
+    eventLoop->apidata = state;
+    return 0;
+}
+
+static int aeApiResize(aeEventLoop *eventLoop, int setsize) {
+    aeApiState *state = eventLoop->apidata;
+
+    state->events = zrealloc(state->events, sizeof(struct kevent)*setsize);
+    return 0;
+}
+
+static void aeApiFree(aeEventLoop *eventLoop) {
+    aeApiState *state = eventLoop->apidata;
+
+    close(state->kqfd);
+    zfree(state->events);
+    zfree(state);
+}
+
+static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) {
+    aeApiState *state = eventLoop->apidata;
+    struct kevent ke;
+
+    if (mask & AE_READABLE) {
+        EV_SET(&ke, fd, EVFILT_READ, EV_ADD, 0, 0, NULL);
+        if (kevent(state->kqfd, &ke, 1, NULL, 0, NULL) == -1) return -1;
+    }
+    if (mask & AE_WRITABLE) {
+        EV_SET(&ke, fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL);
+        if (kevent(state->kqfd, &ke, 1, NULL, 0, NULL) == -1) return -1;
+    }
+    return 0;
+}
+
+static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) {
+    aeApiState *state = eventLoop->apidata;
+    struct kevent ke;
+
+    if (mask & AE_READABLE) {
+        EV_SET(&ke, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
+        kevent(state->kqfd, &ke, 1, NULL, 0, NULL);
+    }
+    if (mask & AE_WRITABLE) {
+        EV_SET(&ke, fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
+        kevent(state->kqfd, &ke, 1, NULL, 0, NULL);
+    }
+}
+
+static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) {
+    aeApiState *state = eventLoop->apidata;
+    int retval, numevents = 0;
+
+    if (tvp != NULL) {
+        struct timespec timeout;
+        timeout.tv_sec = tvp->tv_sec;
+        timeout.tv_nsec = tvp->tv_usec * 1000;
+        retval = kevent(state->kqfd, NULL, 0, state->events, eventLoop->setsize,
+                        &timeout);
+    } else {
+        retval = kevent(state->kqfd, NULL, 0, state->events, eventLoop->setsize,
+                        NULL);
+    }
+
+    if (retval > 0) {
+        int j;
+
+        numevents = retval;
+        for(j = 0; j < numevents; j++) {
+            int mask = 0;
+            struct kevent *e = state->events+j;
+
+            if (e->filter == EVFILT_READ) mask |= AE_READABLE;
+            if (e->filter == EVFILT_WRITE) mask |= AE_WRITABLE;
+            eventLoop->fired[j].fd = e->ident;
+            eventLoop->fired[j].mask = mask;
+        }
+    }
+    return numevents;
+}
+
+static char *aeApiName(void) {
+    return "kqueue";
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/thirdparty/ae/ae_select.c
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/thirdparty/ae/ae_select.c b/cpp/src/plasma/thirdparty/ae/ae_select.c
new file mode 100644
index 0000000..c039a8e
--- /dev/null
+++ b/cpp/src/plasma/thirdparty/ae/ae_select.c
@@ -0,0 +1,106 @@
+/* Select()-based ae.c module.
+ *
+ * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *   * Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *   * Neither the name of Redis nor the names of its contributors may be used
+ *     to endorse or promote products derived from this software without
+ *     specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <sys/select.h>
+#include <string.h>
+
+typedef struct aeApiState {
+    fd_set rfds, wfds;
+    /* We need to have a copy of the fd sets as it's not safe to reuse
+     * FD sets after select(). */
+    fd_set _rfds, _wfds;
+} aeApiState;
+
+static int aeApiCreate(aeEventLoop *eventLoop) {
+    aeApiState *state = zmalloc(sizeof(aeApiState));
+
+    if (!state) return -1;
+    FD_ZERO(&state->rfds);
+    FD_ZERO(&state->wfds);
+    eventLoop->apidata = state;
+    return 0;
+}
+
+static int aeApiResize(aeEventLoop *eventLoop, int setsize) {
+    /* Just ensure we have enough room in the fd_set type. */
+    if (setsize >= FD_SETSIZE) return -1;
+    return 0;
+}
+
+static void aeApiFree(aeEventLoop *eventLoop) {
+    zfree(eventLoop->apidata);
+}
+
+static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) {
+    aeApiState *state = eventLoop->apidata;
+
+    if (mask & AE_READABLE) FD_SET(fd,&state->rfds);
+    if (mask & AE_WRITABLE) FD_SET(fd,&state->wfds);
+    return 0;
+}
+
+static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) {
+    aeApiState *state = eventLoop->apidata;
+
+    if (mask & AE_READABLE) FD_CLR(fd,&state->rfds);
+    if (mask & AE_WRITABLE) FD_CLR(fd,&state->wfds);
+}
+
+static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) {
+    aeApiState *state = eventLoop->apidata;
+    int retval, j, numevents = 0;
+
+    memcpy(&state->_rfds,&state->rfds,sizeof(fd_set));
+    memcpy(&state->_wfds,&state->wfds,sizeof(fd_set));
+
+    retval = select(eventLoop->maxfd+1,
+                &state->_rfds,&state->_wfds,NULL,tvp);
+    if (retval > 0) {
+        for (j = 0; j <= eventLoop->maxfd; j++) {
+            int mask = 0;
+            aeFileEvent *fe = &eventLoop->events[j];
+
+            if (fe->mask == AE_NONE) continue;
+            if (fe->mask & AE_READABLE && FD_ISSET(j,&state->_rfds))
+                mask |= AE_READABLE;
+            if (fe->mask & AE_WRITABLE && FD_ISSET(j,&state->_wfds))
+                mask |= AE_WRITABLE;
+            eventLoop->fired[numevents].fd = j;
+            eventLoop->fired[numevents].mask = mask;
+            numevents++;
+        }
+    }
+    return numevents;
+}
+
+static char *aeApiName(void) {
+    return "select";
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/thirdparty/ae/config.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/thirdparty/ae/config.h b/cpp/src/plasma/thirdparty/ae/config.h
new file mode 100644
index 0000000..4f8e1ea
--- /dev/null
+++ b/cpp/src/plasma/thirdparty/ae/config.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *   * Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *   * Neither the name of Redis nor the names of its contributors may be used
+ *     to endorse or promote products derived from this software without
+ *     specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CONFIG_H
+#define __CONFIG_H
+
+#ifdef __APPLE__
+#include <AvailabilityMacros.h>
+#endif
+
+/* Test for polling API */
+#ifdef __linux__
+#define HAVE_EPOLL 1
+#endif
+
+#if (defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6)) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined (__NetBSD__)
+#define HAVE_KQUEUE 1
+#endif
+
+#ifdef __sun
+#include <sys/feature_tests.h>
+#ifdef _DTRACE_VERSION
+#define HAVE_EVPORT 1
+#endif
+#endif
+
+
+#endif

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/thirdparty/ae/zmalloc.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/thirdparty/ae/zmalloc.h b/cpp/src/plasma/thirdparty/ae/zmalloc.h
new file mode 100644
index 0000000..6c27dd4
--- /dev/null
+++ b/cpp/src/plasma/thirdparty/ae/zmalloc.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *   * Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *   * Neither the name of Redis nor the names of its contributors may be used
+ *     to endorse or promote products derived from this software without
+ *     specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ZMALLOC_H
+#define _ZMALLOC_H
+
+#ifndef zmalloc
+#define zmalloc malloc
+#endif
+
+#ifndef zfree
+#define zfree free
+#endif
+
+#ifndef zrealloc
+#define zrealloc realloc
+#endif
+
+#endif /* _ZMALLOC_H */


[2/6] arrow git commit: ARROW-1104: Integrate in-memory object store into arrow

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/thirdparty/dlmalloc.c
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/thirdparty/dlmalloc.c b/cpp/src/plasma/thirdparty/dlmalloc.c
new file mode 100644
index 0000000..84ccbd2
--- /dev/null
+++ b/cpp/src/plasma/thirdparty/dlmalloc.c
@@ -0,0 +1,6281 @@
+/*
+  This is a version (aka dlmalloc) of malloc/free/realloc written by
+  Doug Lea and released to the public domain, as explained at
+  http://creativecommons.org/publicdomain/zero/1.0/ Send questions,
+  comments, complaints, performance data, etc to dl@cs.oswego.edu
+
+* Version 2.8.6 Wed Aug 29 06:57:58 2012  Doug Lea
+   Note: There may be an updated version of this malloc obtainable at
+           ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+         Check before installing!
+
+* Quickstart
+
+  This library is all in one file to simplify the most common usage:
+  ftp it, compile it (-O3), and link it into another program. All of
+  the compile-time options default to reasonable values for use on
+  most platforms.  You might later want to step through various
+  compile-time and dynamic tuning options.
+
+  For convenience, an include file for code using this malloc is at:
+     ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.6.h
+  You don't really need this .h file unless you call functions not
+  defined in your system include files.  The .h file contains only the
+  excerpts from this file needed for using this malloc on ANSI C/C++
+  systems, so long as you haven't changed compile-time options about
+  naming and tuning parameters.  If you do, then you can create your
+  own malloc.h that does include all settings by cutting at the point
+  indicated below. Note that you may already by default be using a C
+  library containing a malloc that is based on some version of this
+  malloc (for example in linux). You might still want to use the one
+  in this file to customize settings or to avoid overheads associated
+  with library versions.
+
+* Vital statistics:
+
+  Supported pointer/size_t representation:       4 or 8 bytes
+       size_t MUST be an unsigned type of the same width as
+       pointers. (If you are using an ancient system that declares
+       size_t as a signed type, or need it to be a different width
+       than pointers, you can use a previous release of this malloc
+       (e.g. 2.7.2) supporting these.)
+
+  Alignment:                                     8 bytes (minimum)
+       This suffices for nearly all current machines and C compilers.
+       However, you can define MALLOC_ALIGNMENT to be wider than this
+       if necessary (up to 128bytes), at the expense of using more space.
+
+  Minimum overhead per allocated chunk:   4 or  8 bytes (if 4byte sizes)
+                                          8 or 16 bytes (if 8byte sizes)
+       Each malloced chunk has a hidden word of overhead holding size
+       and status information, and additional cross-check word
+       if FOOTERS is defined.
+
+  Minimum allocated size: 4-byte ptrs:  16 bytes    (including overhead)
+                          8-byte ptrs:  32 bytes    (including overhead)
+
+       Even a request for zero bytes (i.e., malloc(0)) returns a
+       pointer to something of the minimum allocatable size.
+       The maximum overhead wastage (i.e., number of extra bytes
+       allocated than were requested in malloc) is less than or equal
+       to the minimum size, except for requests >= mmap_threshold that
+       are serviced via mmap(), where the worst case wastage is about
+       32 bytes plus the remainder from a system page (the minimal
+       mmap unit); typically 4096 or 8192 bytes.
+
+  Security: static-safe; optionally more or less
+       The "security" of malloc refers to the ability of malicious
+       code to accentuate the effects of errors (for example, freeing
+       space that is not currently malloc'ed or overwriting past the
+       ends of chunks) in code that calls malloc.  This malloc
+       guarantees not to modify any memory locations below the base of
+       heap, i.e., static variables, even in the presence of usage
+       errors.  The routines additionally detect most improper frees
+       and reallocs.  All this holds as long as the static bookkeeping
+       for malloc itself is not corrupted by some other means.  This
+       is only one aspect of security -- these checks do not, and
+       cannot, detect all possible programming errors.
+
+       If FOOTERS is defined nonzero, then each allocated chunk
+       carries an additional check word to verify that it was malloced
+       from its space.  These check words are the same within each
+       execution of a program using malloc, but differ across
+       executions, so externally crafted fake chunks cannot be
+       freed. This improves security by rejecting frees/reallocs that
+       could corrupt heap memory, in addition to the checks preventing
+       writes to statics that are always on.  This may further improve
+       security at the expense of time and space overhead.  (Note that
+       FOOTERS may also be worth using with MSPACES.)
+
+       By default detected errors cause the program to abort (calling
+       "abort()"). You can override this to instead proceed past
+       errors by defining PROCEED_ON_ERROR.  In this case, a bad free
+       has no effect, and a malloc that encounters a bad address
+       caused by user overwrites will ignore the bad address by
+       dropping pointers and indices to all known memory. This may
+       be appropriate for programs that should continue if at all
+       possible in the face of programming errors, although they may
+       run out of memory because dropped memory is never reclaimed.
+
+       If you don't like either of these options, you can define
+       CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
+       else. And if if you are sure that your program using malloc has
+       no errors or vulnerabilities, you can define INSECURE to 1,
+       which might (or might not) provide a small performance improvement.
+
+       It is also possible to limit the maximum total allocatable
+       space, using malloc_set_footprint_limit. This is not
+       designed as a security feature in itself (calls to set limits
+       are not screened or privileged), but may be useful as one
+       aspect of a secure implementation.
+
+  Thread-safety: NOT thread-safe unless USE_LOCKS defined non-zero
+       When USE_LOCKS is defined, each public call to malloc, free,
+       etc is surrounded with a lock. By default, this uses a plain
+       pthread mutex, win32 critical section, or a spin-lock if if
+       available for the platform and not disabled by setting
+       USE_SPIN_LOCKS=0.  However, if USE_RECURSIVE_LOCKS is defined,
+       recursive versions are used instead (which are not required for
+       base functionality but may be needed in layered extensions).
+       Using a global lock is not especially fast, and can be a major
+       bottleneck.  It is designed only to provide minimal protection
+       in concurrent environments, and to provide a basis for
+       extensions.  If you are using malloc in a concurrent program,
+       consider instead using nedmalloc
+       (http://www.nedprod.com/programs/portable/nedmalloc/) or
+       ptmalloc (See http://www.malloc.de), which are derived from
+       versions of this malloc.
+
+  System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
+       This malloc can use unix sbrk or any emulation (invoked using
+       the CALL_MORECORE macro) and/or mmap/munmap or any emulation
+       (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
+       memory.  On most unix systems, it tends to work best if both
+       MORECORE and MMAP are enabled.  On Win32, it uses emulations
+       based on VirtualAlloc. It also uses common C library functions
+       like memset.
+
+  Compliance: I believe it is compliant with the Single Unix Specification
+       (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
+       others as well.
+
+* Overview of algorithms
+
+  This is not the fastest, most space-conserving, most portable, or
+  most tunable malloc ever written. However it is among the fastest
+  while also being among the most space-conserving, portable and
+  tunable.  Consistent balance across these factors results in a good
+  general-purpose allocator for malloc-intensive programs.
+
+  In most ways, this malloc is a best-fit allocator. Generally, it
+  chooses the best-fitting existing chunk for a request, with ties
+  broken in approximately least-recently-used order. (This strategy
+  normally maintains low fragmentation.) However, for requests less
+  than 256bytes, it deviates from best-fit when there is not an
+  exactly fitting available chunk by preferring to use space adjacent
+  to that used for the previous small request, as well as by breaking
+  ties in approximately most-recently-used order. (These enhance
+  locality of series of small allocations.)  And for very large requests
+  (>= 256Kb by default), it relies on system memory mapping
+  facilities, if supported.  (This helps avoid carrying around and
+  possibly fragmenting memory used only for large chunks.)
+
+  All operations (except malloc_stats and mallinfo) have execution
+  times that are bounded by a constant factor of the number of bits in
+  a size_t, not counting any clearing in calloc or copying in realloc,
+  or actions surrounding MORECORE and MMAP that have times
+  proportional to the number of non-contiguous regions returned by
+  system allocation routines, which is often just 1. In real-time
+  applications, you can optionally suppress segment traversals using
+  NO_SEGMENT_TRAVERSAL, which assures bounded execution even when
+  system allocators return non-contiguous spaces, at the typical
+  expense of carrying around more memory and increased fragmentation.
+
+  The implementation is not very modular and seriously overuses
+  macros. Perhaps someday all C compilers will do as good a job
+  inlining modular code as can now be done by brute-force expansion,
+  but now, enough of them seem not to.
+
+  Some compilers issue a lot of warnings about code that is
+  dead/unreachable only on some platforms, and also about intentional
+  uses of negation on unsigned types. All known cases of each can be
+  ignored.
+
+  For a longer but out of date high-level description, see
+     http://gee.cs.oswego.edu/dl/html/malloc.html
+
+* MSPACES
+  If MSPACES is defined, then in addition to malloc, free, etc.,
+  this file also defines mspace_malloc, mspace_free, etc. These
+  are versions of malloc routines that take an "mspace" argument
+  obtained using create_mspace, to control all internal bookkeeping.
+  If ONLY_MSPACES is defined, only these versions are compiled.
+  So if you would like to use this allocator for only some allocations,
+  and your system malloc for others, you can compile with
+  ONLY_MSPACES and then do something like...
+    static mspace mymspace = create_mspace(0,0); // for example
+    #define mymalloc(bytes)  mspace_malloc(mymspace, bytes)
+
+  (Note: If you only need one instance of an mspace, you can instead
+  use "USE_DL_PREFIX" to relabel the global malloc.)
+
+  You can similarly create thread-local allocators by storing
+  mspaces as thread-locals. For example:
+    static __thread mspace tlms = 0;
+    void*  tlmalloc(size_t bytes) {
+      if (tlms == 0) tlms = create_mspace(0, 0);
+      return mspace_malloc(tlms, bytes);
+    }
+    void  tlfree(void* mem) { mspace_free(tlms, mem); }
+
+  Unless FOOTERS is defined, each mspace is completely independent.
+  You cannot allocate from one and free to another (although
+  conformance is only weakly checked, so usage errors are not always
+  caught). If FOOTERS is defined, then each chunk carries around a tag
+  indicating its originating mspace, and frees are directed to their
+  originating spaces. Normally, this requires use of locks.
+
+ -------------------------  Compile-time options ---------------------------
+
+Be careful in setting #define values for numerical constants of type
+size_t. On some systems, literal values are not automatically extended
+to size_t precision unless they are explicitly casted. You can also
+use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below.
+
+WIN32                    default: defined if _WIN32 defined
+  Defining WIN32 sets up defaults for MS environment and compilers.
+  Otherwise defaults are for unix. Beware that there seem to be some
+  cases where this malloc might not be a pure drop-in replacement for
+  Win32 malloc: Random-looking failures from Win32 GDI API's (eg;
+  SetDIBits()) may be due to bugs in some video driver implementations
+  when pixel buffers are malloc()ed, and the region spans more than
+  one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb)
+  default granularity, pixel buffers may straddle virtual allocation
+  regions more often than when using the Microsoft allocator.  You can
+  avoid this by using VirtualAlloc() and VirtualFree() for all pixel
+  buffers rather than using malloc().  If this is not possible,
+  recompile this malloc with a larger DEFAULT_GRANULARITY. Note:
+  in cases where MSC and gcc (cygwin) are known to differ on WIN32,
+  conditions use _MSC_VER to distinguish them.
+
+DLMALLOC_EXPORT       default: extern
+  Defines how public APIs are declared. If you want to export via a
+  Windows DLL, you might define this as
+    #define DLMALLOC_EXPORT extern  __declspec(dllexport)
+  If you want a POSIX ELF shared object, you might use
+    #define DLMALLOC_EXPORT extern __attribute__((visibility("default")))
+
+MALLOC_ALIGNMENT         default: (size_t)(2 * sizeof(void *))
+  Controls the minimum alignment for malloc'ed chunks.  It must be a
+  power of two and at least 8, even on machines for which smaller
+  alignments would suffice. It may be defined as larger than this
+  though. Note however that code and data structures are optimized for
+  the case of 8-byte alignment.
+
+MSPACES                  default: 0 (false)
+  If true, compile in support for independent allocation spaces.
+  This is only supported if HAVE_MMAP is true.
+
+ONLY_MSPACES             default: 0 (false)
+  If true, only compile in mspace versions, not regular versions.
+
+USE_LOCKS                default: 0 (false)
+  Causes each call to each public routine to be surrounded with
+  pthread or WIN32 mutex lock/unlock. (If set true, this can be
+  overridden on a per-mspace basis for mspace versions.) If set to a
+  non-zero value other than 1, locks are used, but their
+  implementation is left out, so lock functions must be supplied manually,
+  as described below.
+
+USE_SPIN_LOCKS           default: 1 iff USE_LOCKS and spin locks available
+  If true, uses custom spin locks for locking. This is currently
+  supported only gcc >= 4.1, older gccs on x86 platforms, and recent
+  MS compilers.  Otherwise, posix locks or win32 critical sections are
+  used.
+
+USE_RECURSIVE_LOCKS      default: not defined
+  If defined nonzero, uses recursive (aka reentrant) locks, otherwise
+  uses plain mutexes. This is not required for malloc proper, but may
+  be needed for layered allocators such as nedmalloc.
+
+LOCK_AT_FORK            default: not defined
+  If defined nonzero, performs pthread_atfork upon initialization
+  to initialize child lock while holding parent lock. The implementation
+  assumes that pthread locks (not custom locks) are being used. In other
+  cases, you may need to customize the implementation.
+
+FOOTERS                  default: 0
+  If true, provide extra checking and dispatching by placing
+  information in the footers of allocated chunks. This adds
+  space and time overhead.
+
+INSECURE                 default: 0
+  If true, omit checks for usage errors and heap space overwrites.
+
+USE_DL_PREFIX            default: NOT defined
+  Causes compiler to prefix all public routines with the string 'dl'.
+  This can be useful when you only want to use this malloc in one part
+  of a program, using your regular system malloc elsewhere.
+
+MALLOC_INSPECT_ALL       default: NOT defined
+  If defined, compiles malloc_inspect_all and mspace_inspect_all, that
+  perform traversal of all heap space.  Unless access to these
+  functions is otherwise restricted, you probably do not want to
+  include them in secure implementations.
+
+ABORT                    default: defined as abort()
+  Defines how to abort on failed checks.  On most systems, a failed
+  check cannot die with an "assert" or even print an informative
+  message, because the underlying print routines in turn call malloc,
+  which will fail again.  Generally, the best policy is to simply call
+  abort(). It's not very useful to do more than this because many
+  errors due to overwriting will show up as address faults (null, odd
+  addresses etc) rather than malloc-triggered checks, so will also
+  abort.  Also, most compilers know that abort() does not return, so
+  can better optimize code conditionally calling it.
+
+PROCEED_ON_ERROR           default: defined as 0 (false)
+  Controls whether detected bad addresses cause them to bypassed
+  rather than aborting. If set, detected bad arguments to free and
+  realloc are ignored. And all bookkeeping information is zeroed out
+  upon a detected overwrite of freed heap space, thus losing the
+  ability to ever return it from malloc again, but enabling the
+  application to proceed. If PROCEED_ON_ERROR is defined, the
+  static variable malloc_corruption_error_count is compiled in
+  and can be examined to see if errors have occurred. This option
+  generates slower code than the default abort policy.
+
+DEBUG                    default: NOT defined
+  The DEBUG setting is mainly intended for people trying to modify
+  this code or diagnose problems when porting to new platforms.
+  However, it may also be able to better isolate user errors than just
+  using runtime checks.  The assertions in the check routines spell
+  out in more detail the assumptions and invariants underlying the
+  algorithms.  The checking is fairly extensive, and will slow down
+  execution noticeably. Calling malloc_stats or mallinfo with DEBUG
+  set will attempt to check every non-mmapped allocated and free chunk
+  in the course of computing the summaries.
+
+ABORT_ON_ASSERT_FAILURE   default: defined as 1 (true)
+  Debugging assertion failures can be nearly impossible if your
+  version of the assert macro causes malloc to be called, which will
+  lead to a cascade of further failures, blowing the runtime stack.
+  ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
+  which will usually make debugging easier.
+
+MALLOC_FAILURE_ACTION     default: sets errno to ENOMEM, or no-op on win32
+  The action to take before "return 0" when malloc fails to be able to
+  return memory because there is none available.
+
+HAVE_MORECORE             default: 1 (true) unless win32 or ONLY_MSPACES
+  True if this system supports sbrk or an emulation of it.
+
+MORECORE                  default: sbrk
+  The name of the sbrk-style system routine to call to obtain more
+  memory.  See below for guidance on writing custom MORECORE
+  functions. The type of the argument to sbrk/MORECORE varies across
+  systems.  It cannot be size_t, because it supports negative
+  arguments, so it is normally the signed type of the same width as
+  size_t (sometimes declared as "intptr_t").  It doesn't much matter
+  though. Internally, we only call it with arguments less than half
+  the max value of a size_t, which should work across all reasonable
+  possibilities, although sometimes generating compiler warnings.
+
+MORECORE_CONTIGUOUS       default: 1 (true) if HAVE_MORECORE
+  If true, take advantage of fact that consecutive calls to MORECORE
+  with positive arguments always return contiguous increasing
+  addresses.  This is true of unix sbrk. It does not hurt too much to
+  set it true anyway, since malloc copes with non-contiguities.
+  Setting it false when definitely non-contiguous saves time
+  and possibly wasted space it would take to discover this though.
+
+MORECORE_CANNOT_TRIM      default: NOT defined
+  True if MORECORE cannot release space back to the system when given
+  negative arguments. This is generally necessary only if you are
+  using a hand-crafted MORECORE function that cannot handle negative
+  arguments.
+
+NO_SEGMENT_TRAVERSAL       default: 0
+  If non-zero, suppresses traversals of memory segments
+  returned by either MORECORE or CALL_MMAP. This disables
+  merging of segments that are contiguous, and selectively
+  releasing them to the OS if unused, but bounds execution times.
+
+HAVE_MMAP                 default: 1 (true)
+  True if this system supports mmap or an emulation of it.  If so, and
+  HAVE_MORECORE is not true, MMAP is used for all system
+  allocation. If set and HAVE_MORECORE is true as well, MMAP is
+  primarily used to directly allocate very large blocks. It is also
+  used as a backup strategy in cases where MORECORE fails to provide
+  space from system. Note: A single call to MUNMAP is assumed to be
+  able to unmap memory that may have be allocated using multiple calls
+  to MMAP, so long as they are adjacent.
+
+HAVE_MREMAP               default: 1 on linux, else 0
+  If true realloc() uses mremap() to re-allocate large blocks and
+  extend or shrink allocation spaces.
+
+MMAP_CLEARS               default: 1 except on WINCE.
+  True if mmap clears memory so calloc doesn't need to. This is true
+  for standard unix mmap using /dev/zero and on WIN32 except for WINCE.
+
+USE_BUILTIN_FFS            default: 0 (i.e., not used)
+  Causes malloc to use the builtin ffs() function to compute indices.
+  Some compilers may recognize and intrinsify ffs to be faster than the
+  supplied C version. Also, the case of x86 using gcc is special-cased
+  to an asm instruction, so is already as fast as it can be, and so
+  this setting has no effect. Similarly for Win32 under recent MS compilers.
+  (On most x86s, the asm version is only slightly faster than the C version.)
+
+malloc_getpagesize         default: derive from system includes, or 4096.
+  The system page size. To the extent possible, this malloc manages
+  memory from the system in page-size units.  This may be (and
+  usually is) a function rather than a constant. This is ignored
+  if WIN32, where page size is determined using getSystemInfo during
+  initialization.
+
+USE_DEV_RANDOM             default: 0 (i.e., not used)
+  Causes malloc to use /dev/random to initialize secure magic seed for
+  stamping footers. Otherwise, the current time is used.
+
+NO_MALLINFO                default: 0
+  If defined, don't compile "mallinfo". This can be a simple way
+  of dealing with mismatches between system declarations and
+  those in this file.
+
+MALLINFO_FIELD_TYPE        default: size_t
+  The type of the fields in the mallinfo struct. This was originally
+  defined as "int" in SVID etc, but is more usefully defined as
+  size_t. The value is used only if  HAVE_USR_INCLUDE_MALLOC_H is not set
+
+NO_MALLOC_STATS            default: 0
+  If defined, don't compile "malloc_stats". This avoids calls to
+  fprintf and bringing in stdio dependencies you might not want.
+
+REALLOC_ZERO_BYTES_FREES    default: not defined
+  This should be set if a call to realloc with zero bytes should
+  be the same as a call to free. Some people think it should. Otherwise,
+  since this malloc returns a unique pointer for malloc(0), so does
+  realloc(p, 0).
+
+LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
+LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H,  LACKS_ERRNO_H
+LACKS_STDLIB_H LACKS_SCHED_H LACKS_TIME_H  default: NOT defined unless on WIN32
+  Define these if your system does not have these header files.
+  You might need to manually insert some of the declarations they provide.
+
+DEFAULT_GRANULARITY        default: page size if MORECORE_CONTIGUOUS,
+                                system_info.dwAllocationGranularity in WIN32,
+                                otherwise 64K.
+      Also settable using mallopt(M_GRANULARITY, x)
+  The unit for allocating and deallocating memory from the system.  On
+  most systems with contiguous MORECORE, there is no reason to
+  make this more than a page. However, systems with MMAP tend to
+  either require or encourage larger granularities.  You can increase
+  this value to prevent system allocation functions to be called so
+  often, especially if they are slow.  The value must be at least one
+  page and must be a power of two.  Setting to 0 causes initialization
+  to either page size or win32 region size.  (Note: In previous
+  versions of malloc, the equivalent of this option was called
+  "TOP_PAD")
+
+DEFAULT_TRIM_THRESHOLD    default: 2MB
+      Also settable using mallopt(M_TRIM_THRESHOLD, x)
+  The maximum amount of unused top-most memory to keep before
+  releasing via malloc_trim in free().  Automatic trimming is mainly
+  useful in long-lived programs using contiguous MORECORE.  Because
+  trimming via sbrk can be slow on some systems, and can sometimes be
+  wasteful (in cases where programs immediately afterward allocate
+  more large chunks) the value should be high enough so that your
+  overall system performance would improve by releasing this much
+  memory.  As a rough guide, you might set to a value close to the
+  average size of a process (program) running on your system.
+  Releasing this much memory would allow such a process to run in
+  memory.  Generally, it is worth tuning trim thresholds when a
+  program undergoes phases where several large chunks are allocated
+  and released in ways that can reuse each other's storage, perhaps
+  mixed with phases where there are no such chunks at all. The trim
+  value must be greater than page size to have any useful effect.  To
+  disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
+  some people use of mallocing a huge space and then freeing it at
+  program startup, in an attempt to reserve system memory, doesn't
+  have the intended effect under automatic trimming, since that memory
+  will immediately be returned to the system.
+
+DEFAULT_MMAP_THRESHOLD       default: 256K
+      Also settable using mallopt(M_MMAP_THRESHOLD, x)
+  The request size threshold for using MMAP to directly service a
+  request. Requests of at least this size that cannot be allocated
+  using already-existing space will be serviced via mmap.  (If enough
+  normal freed space already exists it is used instead.)  Using mmap
+  segregates relatively large chunks of memory so that they can be
+  individually obtained and released from the host system. A request
+  serviced through mmap is never reused by any other request (at least
+  not directly; the system may just so happen to remap successive
+  requests to the same locations).  Segregating space in this way has
+  the benefits that: Mmapped space can always be individually released
+  back to the system, which helps keep the system level memory demands
+  of a long-lived program low.  Also, mapped memory doesn't become
+  `locked' between other chunks, as can happen with normally allocated
+  chunks, which means that even trimming via malloc_trim would not
+  release them.  However, it has the disadvantage that the space
+  cannot be reclaimed, consolidated, and then used to service later
+  requests, as happens with normal chunks.  The advantages of mmap
+  nearly always outweigh disadvantages for "large" chunks, but the
+  value of "large" may vary across systems.  The default is an
+  empirically derived value that works well in most systems. You can
+  disable mmap by setting to MAX_SIZE_T.
+
+MAX_RELEASE_CHECK_RATE   default: 4095 unless not HAVE_MMAP
+  The number of consolidated frees between checks to release
+  unused segments when freeing. When using non-contiguous segments,
+  especially with multiple mspaces, checking only for topmost space
+  doesn't always suffice to trigger trimming. To compensate for this,
+  free() will, with a period of MAX_RELEASE_CHECK_RATE (or the
+  current number of segments, if greater) try to release unused
+  segments to the OS when freeing chunks that result in
+  consolidation. The best value for this parameter is a compromise
+  between slowing down frees with relatively costly checks that
+  rarely trigger versus holding on to unused memory. To effectively
+  disable, set to MAX_SIZE_T. This may lead to a very slight speed
+  improvement at the expense of carrying around more memory.
+*/
+
+/* Version identifier to allow people to support multiple versions */
+#ifndef DLMALLOC_VERSION
+#define DLMALLOC_VERSION 20806
+#endif /* DLMALLOC_VERSION */
+
+#ifndef DLMALLOC_EXPORT
+#define DLMALLOC_EXPORT extern
+#endif
+
+#ifndef WIN32
+#ifdef _WIN32
+#define WIN32 1
+#endif  /* _WIN32 */
+#ifdef _WIN32_WCE
+#define LACKS_FCNTL_H
+#define WIN32 1
+#endif /* _WIN32_WCE */
+#endif  /* WIN32 */
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <tchar.h>
+#define HAVE_MMAP 1
+#define HAVE_MORECORE 0
+#define LACKS_UNISTD_H
+#define LACKS_SYS_PARAM_H
+#define LACKS_SYS_MMAN_H
+#define LACKS_STRING_H
+#define LACKS_STRINGS_H
+#define LACKS_SYS_TYPES_H
+#define LACKS_ERRNO_H
+#define LACKS_SCHED_H
+#ifndef MALLOC_FAILURE_ACTION
+#define MALLOC_FAILURE_ACTION
+#endif /* MALLOC_FAILURE_ACTION */
+#ifndef MMAP_CLEARS
+#ifdef _WIN32_WCE /* WINCE reportedly does not clear */
+#define MMAP_CLEARS 0
+#else
+#define MMAP_CLEARS 1
+#endif /* _WIN32_WCE */
+#endif /*MMAP_CLEARS */
+#endif  /* WIN32 */
+
+#if defined(DARWIN) || defined(_DARWIN)
+/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
+#ifndef HAVE_MORECORE
+#define HAVE_MORECORE 0
+#define HAVE_MMAP 1
+/* OSX allocators provide 16 byte alignment */
+#ifndef MALLOC_ALIGNMENT
+#define MALLOC_ALIGNMENT ((size_t)16U)
+#endif
+#endif  /* HAVE_MORECORE */
+#endif  /* DARWIN */
+
+#ifndef LACKS_SYS_TYPES_H
+#include <sys/types.h>  /* For size_t */
+#endif  /* LACKS_SYS_TYPES_H */
+
+/* The maximum possible size_t value has all bits set */
+#define MAX_SIZE_T           (~(size_t)0)
+
+#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */
+#define USE_LOCKS  ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \
+                    (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0))
+#endif /* USE_LOCKS */
+
+#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */
+#if ((defined(__GNUC__) &&                                              \
+      ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) ||      \
+       defined(__i386__) || defined(__x86_64__))) ||                    \
+     (defined(_MSC_VER) && _MSC_VER>=1310))
+#ifndef USE_SPIN_LOCKS
+#define USE_SPIN_LOCKS 1
+#endif /* USE_SPIN_LOCKS */
+#elif USE_SPIN_LOCKS
+#error "USE_SPIN_LOCKS defined without implementation"
+#endif /* ... locks available... */
+#elif !defined(USE_SPIN_LOCKS)
+#define USE_SPIN_LOCKS 0
+#endif /* USE_LOCKS */
+
+#ifndef ONLY_MSPACES
+#define ONLY_MSPACES 0
+#endif  /* ONLY_MSPACES */
+#ifndef MSPACES
+#if ONLY_MSPACES
+#define MSPACES 1
+#else   /* ONLY_MSPACES */
+#define MSPACES 0
+#endif  /* ONLY_MSPACES */
+#endif  /* MSPACES */
+#ifndef MALLOC_ALIGNMENT
+#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))
+#endif  /* MALLOC_ALIGNMENT */
+#ifndef FOOTERS
+#define FOOTERS 0
+#endif  /* FOOTERS */
+#ifndef ABORT
+#define ABORT  abort()
+#endif  /* ABORT */
+#ifndef ABORT_ON_ASSERT_FAILURE
+#define ABORT_ON_ASSERT_FAILURE 1
+#endif  /* ABORT_ON_ASSERT_FAILURE */
+#ifndef PROCEED_ON_ERROR
+#define PROCEED_ON_ERROR 0
+#endif  /* PROCEED_ON_ERROR */
+
+#ifndef INSECURE
+#define INSECURE 0
+#endif  /* INSECURE */
+#ifndef MALLOC_INSPECT_ALL
+#define MALLOC_INSPECT_ALL 0
+#endif  /* MALLOC_INSPECT_ALL */
+#ifndef HAVE_MMAP
+#define HAVE_MMAP 1
+#endif  /* HAVE_MMAP */
+#ifndef MMAP_CLEARS
+#define MMAP_CLEARS 1
+#endif  /* MMAP_CLEARS */
+#ifndef HAVE_MREMAP
+#ifdef linux
+#define HAVE_MREMAP 1
+#define _GNU_SOURCE /* Turns on mremap() definition */
+#else   /* linux */
+#define HAVE_MREMAP 0
+#endif  /* linux */
+#endif  /* HAVE_MREMAP */
+#ifndef MALLOC_FAILURE_ACTION
+#define MALLOC_FAILURE_ACTION  errno = ENOMEM;
+#endif  /* MALLOC_FAILURE_ACTION */
+#ifndef HAVE_MORECORE
+#if ONLY_MSPACES
+#define HAVE_MORECORE 0
+#else   /* ONLY_MSPACES */
+#define HAVE_MORECORE 1
+#endif  /* ONLY_MSPACES */
+#endif  /* HAVE_MORECORE */
+#if !HAVE_MORECORE
+#define MORECORE_CONTIGUOUS 0
+#else   /* !HAVE_MORECORE */
+#define MORECORE_DEFAULT sbrk
+#ifndef MORECORE_CONTIGUOUS
+#define MORECORE_CONTIGUOUS 1
+#endif  /* MORECORE_CONTIGUOUS */
+#endif  /* HAVE_MORECORE */
+#ifndef DEFAULT_GRANULARITY
+#if (MORECORE_CONTIGUOUS || defined(WIN32))
+#define DEFAULT_GRANULARITY (0)  /* 0 means to compute in init_mparams */
+#else   /* MORECORE_CONTIGUOUS */
+#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
+#endif  /* MORECORE_CONTIGUOUS */
+#endif  /* DEFAULT_GRANULARITY */
+#ifndef DEFAULT_TRIM_THRESHOLD
+#ifndef MORECORE_CANNOT_TRIM
+#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
+#else   /* MORECORE_CANNOT_TRIM */
+#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
+#endif  /* MORECORE_CANNOT_TRIM */
+#endif  /* DEFAULT_TRIM_THRESHOLD */
+#ifndef DEFAULT_MMAP_THRESHOLD
+#if HAVE_MMAP
+#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
+#else   /* HAVE_MMAP */
+#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
+#endif  /* HAVE_MMAP */
+#endif  /* DEFAULT_MMAP_THRESHOLD */
+#ifndef MAX_RELEASE_CHECK_RATE
+#if HAVE_MMAP
+#define MAX_RELEASE_CHECK_RATE 4095
+#else
+#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
+#endif /* HAVE_MMAP */
+#endif /* MAX_RELEASE_CHECK_RATE */
+#ifndef USE_BUILTIN_FFS
+#define USE_BUILTIN_FFS 0
+#endif  /* USE_BUILTIN_FFS */
+#ifndef USE_DEV_RANDOM
+#define USE_DEV_RANDOM 0
+#endif  /* USE_DEV_RANDOM */
+#ifndef NO_MALLINFO
+#define NO_MALLINFO 0
+#endif  /* NO_MALLINFO */
+#ifndef MALLINFO_FIELD_TYPE
+#define MALLINFO_FIELD_TYPE size_t
+#endif  /* MALLINFO_FIELD_TYPE */
+#ifndef NO_MALLOC_STATS
+#define NO_MALLOC_STATS 0
+#endif  /* NO_MALLOC_STATS */
+#ifndef NO_SEGMENT_TRAVERSAL
+#define NO_SEGMENT_TRAVERSAL 0
+#endif /* NO_SEGMENT_TRAVERSAL */
+
+/*
+  mallopt tuning options.  SVID/XPG defines four standard parameter
+  numbers for mallopt, normally defined in malloc.h.  None of these
+  are used in this malloc, so setting them has no effect. But this
+  malloc does support the following options.
+*/
+
+#define M_TRIM_THRESHOLD     (-1)
+#define M_GRANULARITY        (-2)
+#define M_MMAP_THRESHOLD     (-3)
+
+/* ------------------------ Mallinfo declarations ------------------------ */
+
+#if !NO_MALLINFO
+/*
+  This version of malloc supports the standard SVID/XPG mallinfo
+  routine that returns a struct containing usage properties and
+  statistics. It should work on any system that has a
+  /usr/include/malloc.h defining struct mallinfo.  The main
+  declaration needed is the mallinfo struct that is returned (by-copy)
+  by mallinfo().  The malloinfo struct contains a bunch of fields that
+  are not even meaningful in this version of malloc.  These fields are
+  are instead filled by mallinfo() with other numbers that might be of
+  interest.
+
+  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
+  /usr/include/malloc.h file that includes a declaration of struct
+  mallinfo.  If so, it is included; else a compliant version is
+  declared below.  These must be precisely the same for mallinfo() to
+  work.  The original SVID version of this struct, defined on most
+  systems with mallinfo, declares all fields as ints. But some others
+  define as unsigned long. If your system defines the fields using a
+  type of different width than listed here, you MUST #include your
+  system version and #define HAVE_USR_INCLUDE_MALLOC_H.
+*/
+
+/* #define HAVE_USR_INCLUDE_MALLOC_H */
+
+#ifdef HAVE_USR_INCLUDE_MALLOC_H
+#include "/usr/include/malloc.h"
+#else /* HAVE_USR_INCLUDE_MALLOC_H */
+#ifndef STRUCT_MALLINFO_DECLARED
+/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */
+#define _STRUCT_MALLINFO
+#define STRUCT_MALLINFO_DECLARED 1
+struct mallinfo {
+  MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
+  MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
+  MALLINFO_FIELD_TYPE smblks;   /* always 0 */
+  MALLINFO_FIELD_TYPE hblks;    /* always 0 */
+  MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
+  MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
+  MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
+  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
+  MALLINFO_FIELD_TYPE fordblks; /* total free space */
+  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
+};
+#endif /* STRUCT_MALLINFO_DECLARED */
+#endif /* HAVE_USR_INCLUDE_MALLOC_H */
+#endif /* NO_MALLINFO */
+
+/*
+  Try to persuade compilers to inline. The most critical functions for
+  inlining are defined as macros, so these aren't used for them.
+*/
+
+#ifndef FORCEINLINE
+  #if defined(__GNUC__)
+#define FORCEINLINE __inline __attribute__ ((always_inline))
+  #elif defined(_MSC_VER)
+    #define FORCEINLINE __forceinline
+  #endif
+#endif
+#ifndef NOINLINE
+  #if defined(__GNUC__)
+    #define NOINLINE __attribute__ ((noinline))
+  #elif defined(_MSC_VER)
+    #define NOINLINE __declspec(noinline)
+  #else
+    #define NOINLINE
+  #endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#ifndef FORCEINLINE
+ #define FORCEINLINE inline
+#endif
+#endif /* __cplusplus */
+#ifndef FORCEINLINE
+ #define FORCEINLINE
+#endif
+
+#if !ONLY_MSPACES
+
+/* ------------------- Declarations of public routines ------------------- */
+
+#ifndef USE_DL_PREFIX
+#define dlcalloc               calloc
+#define dlfree                 free
+#define dlmalloc               malloc
+#define dlmemalign             memalign
+#define dlposix_memalign       posix_memalign
+#define dlrealloc              realloc
+#define dlrealloc_in_place     realloc_in_place
+#define dlvalloc               valloc
+#define dlpvalloc              pvalloc
+#define dlmallinfo             mallinfo
+#define dlmallopt              mallopt
+#define dlmalloc_trim          malloc_trim
+#define dlmalloc_stats         malloc_stats
+#define dlmalloc_usable_size   malloc_usable_size
+#define dlmalloc_footprint     malloc_footprint
+#define dlmalloc_max_footprint malloc_max_footprint
+#define dlmalloc_footprint_limit malloc_footprint_limit
+#define dlmalloc_set_footprint_limit malloc_set_footprint_limit
+#define dlmalloc_inspect_all   malloc_inspect_all
+#define dlindependent_calloc   independent_calloc
+#define dlindependent_comalloc independent_comalloc
+#define dlbulk_free            bulk_free
+#endif /* USE_DL_PREFIX */
+
+/*
+  malloc(size_t n)
+  Returns a pointer to a newly allocated chunk of at least n bytes, or
+  null if no space is available, in which case errno is set to ENOMEM
+  on ANSI C systems.
+
+  If n is zero, malloc returns a minimum-sized chunk. (The minimum
+  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
+  systems.)  Note that size_t is an unsigned type, so calls with
+  arguments that would be negative if signed are interpreted as
+  requests for huge amounts of space, which will often fail. The
+  maximum supported value of n differs across systems, but is in all
+  cases less than the maximum representable value of a size_t.
+*/
+DLMALLOC_EXPORT void* dlmalloc(size_t);
+
+/*
+  free(void* p)
+  Releases the chunk of memory pointed to by p, that had been previously
+  allocated using malloc or a related routine such as realloc.
+  It has no effect if p is null. If p was not malloced or already
+  freed, free(p) will by default cause the current program to abort.
+*/
+DLMALLOC_EXPORT void  dlfree(void*);
+
+/*
+  calloc(size_t n_elements, size_t element_size);
+  Returns a pointer to n_elements * element_size bytes, with all locations
+  set to zero.
+*/
+DLMALLOC_EXPORT void* dlcalloc(size_t, size_t);
+
+/*
+  realloc(void* p, size_t n)
+  Returns a pointer to a chunk of size n that contains the same data
+  as does chunk p up to the minimum of (n, p's size) bytes, or null
+  if no space is available.
+
+  The returned pointer may or may not be the same as p. The algorithm
+  prefers extending p in most cases when possible, otherwise it
+  employs the equivalent of a malloc-copy-free sequence.
+
+  If p is null, realloc is equivalent to malloc.
+
+  If space is not available, realloc returns null, errno is set (if on
+  ANSI) and p is NOT freed.
+
+  if n is for fewer bytes than already held by p, the newly unused
+  space is lopped off and freed if possible.  realloc with a size
+  argument of zero (re)allocates a minimum-sized chunk.
+
+  The old unix realloc convention of allowing the last-free'd chunk
+  to be used as an argument to realloc is not supported.
+*/
+DLMALLOC_EXPORT void* dlrealloc(void*, size_t);
+
+/*
+  realloc_in_place(void* p, size_t n)
+  Resizes the space allocated for p to size n, only if this can be
+  done without moving p (i.e., only if there is adjacent space
+  available if n is greater than p's current allocated size, or n is
+  less than or equal to p's size). This may be used instead of plain
+  realloc if an alternative allocation strategy is needed upon failure
+  to expand space; for example, reallocation of a buffer that must be
+  memory-aligned or cleared. You can use realloc_in_place to trigger
+  these alternatives only when needed.
+
+  Returns p if successful; otherwise null.
+*/
+DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t);
+
+/*
+  memalign(size_t alignment, size_t n);
+  Returns a pointer to a newly allocated chunk of n bytes, aligned
+  in accord with the alignment argument.
+
+  The alignment argument should be a power of two. If the argument is
+  not a power of two, the nearest greater power is used.
+  8-byte alignment is guaranteed by normal malloc calls, so don't
+  bother calling memalign with an argument of 8 or less.
+
+  Overreliance on memalign is a sure way to fragment space.
+*/
+DLMALLOC_EXPORT void* dlmemalign(size_t, size_t);
+
+/*
+  int posix_memalign(void** pp, size_t alignment, size_t n);
+  Allocates a chunk of n bytes, aligned in accord with the alignment
+  argument. Differs from memalign only in that it (1) assigns the
+  allocated memory to *pp rather than returning it, (2) fails and
+  returns EINVAL if the alignment is not a power of two (3) fails and
+  returns ENOMEM if memory cannot be allocated.
+*/
+DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t);
+
+/*
+  valloc(size_t n);
+  Equivalent to memalign(pagesize, n), where pagesize is the page
+  size of the system. If the pagesize is unknown, 4096 is used.
+*/
+DLMALLOC_EXPORT void* dlvalloc(size_t);
+
+/*
+  mallopt(int parameter_number, int parameter_value)
+  Sets tunable parameters The format is to provide a
+  (parameter-number, parameter-value) pair.  mallopt then sets the
+  corresponding parameter to the argument value if it can (i.e., so
+  long as the value is meaningful), and returns 1 if successful else
+  0.  To workaround the fact that mallopt is specified to use int,
+  not size_t parameters, the value -1 is specially treated as the
+  maximum unsigned size_t value.
+
+  SVID/XPG/ANSI defines four standard param numbers for mallopt,
+  normally defined in malloc.h.  None of these are use in this malloc,
+  so setting them has no effect. But this malloc also supports other
+  options in mallopt. See below for details.  Briefly, supported
+  parameters are as follows (listed defaults are for "typical"
+  configurations).
+
+  Symbol            param #  default    allowed param values
+  M_TRIM_THRESHOLD     -1   2*1024*1024   any   (-1 disables)
+  M_GRANULARITY        -2     page size   any power of 2 >= page size
+  M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
+*/
+DLMALLOC_EXPORT int dlmallopt(int, int);
+
+/*
+  malloc_footprint();
+  Returns the number of bytes obtained from the system.  The total
+  number of bytes allocated by malloc, realloc etc., is less than this
+  value. Unlike mallinfo, this function returns only a precomputed
+  result, so can be called frequently to monitor memory consumption.
+  Even if locks are otherwise defined, this function does not use them,
+  so results might not be up to date.
+*/
+DLMALLOC_EXPORT size_t dlmalloc_footprint(void);
+
+/*
+  malloc_max_footprint();
+  Returns the maximum number of bytes obtained from the system. This
+  value will be greater than current footprint if deallocated space
+  has been reclaimed by the system. The peak number of bytes allocated
+  by malloc, realloc etc., is less than this value. Unlike mallinfo,
+  this function returns only a precomputed result, so can be called
+  frequently to monitor memory consumption.  Even if locks are
+  otherwise defined, this function does not use them, so results might
+  not be up to date.
+*/
+DLMALLOC_EXPORT size_t dlmalloc_max_footprint(void);
+
+/*
+  malloc_footprint_limit();
+  Returns the number of bytes that the heap is allowed to obtain from
+  the system, returning the last value returned by
+  malloc_set_footprint_limit, or the maximum size_t value if
+  never set. The returned value reflects a permission. There is no
+  guarantee that this number of bytes can actually be obtained from
+  the system.
+*/
+DLMALLOC_EXPORT size_t dlmalloc_footprint_limit();
+
+/*
+  malloc_set_footprint_limit();
+  Sets the maximum number of bytes to obtain from the system, causing
+  failure returns from malloc and related functions upon attempts to
+  exceed this value. The argument value may be subject to page
+  rounding to an enforceable limit; this actual value is returned.
+  Using an argument of the maximum possible size_t effectively
+  disables checks. If the argument is less than or equal to the
+  current malloc_footprint, then all future allocations that require
+  additional system memory will fail. However, invocation cannot
+  retroactively deallocate existing used memory.
+*/
+DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes);
+
+#if MALLOC_INSPECT_ALL
+/*
+  malloc_inspect_all(void(*handler)(void *start,
+                                    void *end,
+                                    size_t used_bytes,
+                                    void* callback_arg),
+                      void* arg);
+  Traverses the heap and calls the given handler for each managed
+  region, skipping all bytes that are (or may be) used for bookkeeping
+  purposes.  Traversal does not include include chunks that have been
+  directly memory mapped. Each reported region begins at the start
+  address, and continues up to but not including the end address.  The
+  first used_bytes of the region contain allocated data. If
+  used_bytes is zero, the region is unallocated. The handler is
+  invoked with the given callback argument. If locks are defined, they
+  are held during the entire traversal. It is a bad idea to invoke
+  other malloc functions from within the handler.
+
+  For example, to count the number of in-use chunks with size greater
+  than 1000, you could write:
+  static int count = 0;
+  void count_chunks(void* start, void* end, size_t used, void* arg) {
+    if (used >= 1000) ++count;
+  }
+  then:
+    malloc_inspect_all(count_chunks, NULL);
+
+  malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
+*/
+DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),
+                           void* arg);
+
+#endif /* MALLOC_INSPECT_ALL */
+
+#if !NO_MALLINFO
+/*
+  mallinfo()
+  Returns (by copy) a struct containing various summary statistics:
+
+  arena:     current total non-mmapped bytes allocated from system
+  ordblks:   the number of free chunks
+  smblks:    always zero.
+  hblks:     current number of mmapped regions
+  hblkhd:    total bytes held in mmapped regions
+  usmblks:   the maximum total allocated space. This will be greater
+                than current total if trimming has occurred.
+  fsmblks:   always zero
+  uordblks:  current total allocated space (normal or mmapped)
+  fordblks:  total free space
+  keepcost:  the maximum number of bytes that could ideally be released
+               back to system via malloc_trim. ("ideally" means that
+               it ignores page restrictions etc.)
+
+  Because these fields are ints, but internal bookkeeping may
+  be kept as longs, the reported values may wrap around zero and
+  thus be inaccurate.
+*/
+DLMALLOC_EXPORT struct mallinfo dlmallinfo(void);
+#endif /* NO_MALLINFO */
+
+/*
+  independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
+
+  independent_calloc is similar to calloc, but instead of returning a
+  single cleared space, it returns an array of pointers to n_elements
+  independent elements that can hold contents of size elem_size, each
+  of which starts out cleared, and can be independently freed,
+  realloc'ed etc. The elements are guaranteed to be adjacently
+  allocated (this is not guaranteed to occur with multiple callocs or
+  mallocs), which may also improve cache locality in some
+  applications.
+
+  The "chunks" argument is optional (i.e., may be null, which is
+  probably the most typical usage). If it is null, the returned array
+  is itself dynamically allocated and should also be freed when it is
+  no longer needed. Otherwise, the chunks array must be of at least
+  n_elements in length. It is filled in with the pointers to the
+  chunks.
+
+  In either case, independent_calloc returns this pointer array, or
+  null if the allocation failed.  If n_elements is zero and "chunks"
+  is null, it returns a chunk representing an array with zero elements
+  (which should be freed if not wanted).
+
+  Each element must be freed when it is no longer needed. This can be
+  done all at once using bulk_free.
+
+  independent_calloc simplifies and speeds up implementations of many
+  kinds of pools.  It may also be useful when constructing large data
+  structures that initially have a fixed number of fixed-sized nodes,
+  but the number is not known at compile time, and some of the nodes
+  may later need to be freed. For example:
+
+  struct Node { int item; struct Node* next; };
+
+  struct Node* build_list() {
+    struct Node** pool;
+    int n = read_number_of_nodes_needed();
+    if (n <= 0) return 0;
+    pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
+    if (pool == 0) die();
+    // organize into a linked list...
+    struct Node* first = pool[0];
+    for (i = 0; i < n-1; ++i)
+      pool[i]->next = pool[i+1];
+    free(pool);     // Can now free the array (or not, if it is needed later)
+    return first;
+  }
+*/
+DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**);
+
+/*
+  independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
+
+  independent_comalloc allocates, all at once, a set of n_elements
+  chunks with sizes indicated in the "sizes" array.    It returns
+  an array of pointers to these elements, each of which can be
+  independently freed, realloc'ed etc. The elements are guaranteed to
+  be adjacently allocated (this is not guaranteed to occur with
+  multiple callocs or mallocs), which may also improve cache locality
+  in some applications.
+
+  The "chunks" argument is optional (i.e., may be null). If it is null
+  the returned array is itself dynamically allocated and should also
+  be freed when it is no longer needed. Otherwise, the chunks array
+  must be of at least n_elements in length. It is filled in with the
+  pointers to the chunks.
+
+  In either case, independent_comalloc returns this pointer array, or
+  null if the allocation failed.  If n_elements is zero and chunks is
+  null, it returns a chunk representing an array with zero elements
+  (which should be freed if not wanted).
+
+  Each element must be freed when it is no longer needed. This can be
+  done all at once using bulk_free.
+
+  independent_comallac differs from independent_calloc in that each
+  element may have a different size, and also that it does not
+  automatically clear elements.
+
+  independent_comalloc can be used to speed up allocation in cases
+  where several structs or objects must always be allocated at the
+  same time.  For example:
+
+  struct Head { ... }
+  struct Foot { ... }
+
+  void send_message(char* msg) {
+    int msglen = strlen(msg);
+    size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
+    void* chunks[3];
+    if (independent_comalloc(3, sizes, chunks) == 0)
+      die();
+    struct Head* head = (struct Head*)(chunks[0]);
+    char*        body = (char*)(chunks[1]);
+    struct Foot* foot = (struct Foot*)(chunks[2]);
+    // ...
+  }
+
+  In general though, independent_comalloc is worth using only for
+  larger values of n_elements. For small values, you probably won't
+  detect enough difference from series of malloc calls to bother.
+
+  Overuse of independent_comalloc can increase overall memory usage,
+  since it cannot reuse existing noncontiguous small chunks that
+  might be available for some of the elements.
+*/
+DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**);
+
+/*
+  bulk_free(void* array[], size_t n_elements)
+  Frees and clears (sets to null) each non-null pointer in the given
+  array.  This is likely to be faster than freeing them one-by-one.
+  If footers are used, pointers that have been allocated in different
+  mspaces are not freed or cleared, and the count of all such pointers
+  is returned.  For large arrays of pointers with poor locality, it
+  may be worthwhile to sort this array before calling bulk_free.
+*/
+DLMALLOC_EXPORT size_t  dlbulk_free(void**, size_t n_elements);
+
+/*
+  pvalloc(size_t n);
+  Equivalent to valloc(minimum-page-that-holds(n)), that is,
+  round up n to nearest pagesize.
+ */
+DLMALLOC_EXPORT void*  dlpvalloc(size_t);
+
+/*
+  malloc_trim(size_t pad);
+
+  If possible, gives memory back to the system (via negative arguments
+  to sbrk) if there is unused memory at the `high' end of the malloc
+  pool or in unused MMAP segments. You can call this after freeing
+  large blocks of memory to potentially reduce the system-level memory
+  requirements of a program. However, it cannot guarantee to reduce
+  memory. Under some allocation patterns, some large free blocks of
+  memory will be locked between two used chunks, so they cannot be
+  given back to the system.
+
+  The `pad' argument to malloc_trim represents the amount of free
+  trailing space to leave untrimmed. If this argument is zero, only
+  the minimum amount of memory to maintain internal data structures
+  will be left. Non-zero arguments can be supplied to maintain enough
+  trailing space to service future expected allocations without having
+  to re-obtain memory from the system.
+
+  Malloc_trim returns 1 if it actually released any memory, else 0.
+*/
+DLMALLOC_EXPORT int  dlmalloc_trim(size_t);
+
+/*
+  malloc_stats();
+  Prints on stderr the amount of space obtained from the system (both
+  via sbrk and mmap), the maximum amount (which may be more than
+  current if malloc_trim and/or munmap got called), and the current
+  number of bytes allocated via malloc (or realloc, etc) but not yet
+  freed. Note that this is the number of bytes allocated, not the
+  number requested. It will be larger than the number requested
+  because of alignment and bookkeeping overhead. Because it includes
+  alignment wastage as being in use, this figure may be greater than
+  zero even when no user-level chunks are allocated.
+
+  The reported current and maximum system memory can be inaccurate if
+  a program makes other calls to system memory allocation functions
+  (normally sbrk) outside of malloc.
+
+  malloc_stats prints only the most commonly interesting statistics.
+  More information can be obtained by calling mallinfo.
+*/
+DLMALLOC_EXPORT void  dlmalloc_stats(void);
+
+/*
+  malloc_usable_size(void* p);
+
+  Returns the number of bytes you can actually use in
+  an allocated chunk, which may be more than you requested (although
+  often not) due to alignment and minimum size constraints.
+  You can use this many bytes without worrying about
+  overwriting other allocated objects. This is not a particularly great
+  programming practice. malloc_usable_size can be more useful in
+  debugging and assertions, for example:
+
+  p = malloc(n);
+  assert(malloc_usable_size(p) >= 256);
+*/
+size_t dlmalloc_usable_size(void*);
+
+#endif /* ONLY_MSPACES */
+
+#if MSPACES
+
+/*
+  mspace is an opaque type representing an independent
+  region of space that supports mspace_malloc, etc.
+*/
+typedef void* mspace;
+
+/*
+  create_mspace creates and returns a new independent space with the
+  given initial capacity, or, if 0, the default granularity size.  It
+  returns null if there is no system memory available to create the
+  space.  If argument locked is non-zero, the space uses a separate
+  lock to control access. The capacity of the space will grow
+  dynamically as needed to service mspace_malloc requests.  You can
+  control the sizes of incremental increases of this space by
+  compiling with a different DEFAULT_GRANULARITY or dynamically
+  setting with mallopt(M_GRANULARITY, value).
+*/
+DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked);
+
+/*
+  destroy_mspace destroys the given space, and attempts to return all
+  of its memory back to the system, returning the total number of
+  bytes freed. After destruction, the results of access to all memory
+  used by the space become undefined.
+*/
+DLMALLOC_EXPORT size_t destroy_mspace(mspace msp);
+
+/*
+  create_mspace_with_base uses the memory supplied as the initial base
+  of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
+  space is used for bookkeeping, so the capacity must be at least this
+  large. (Otherwise 0 is returned.) When this initial space is
+  exhausted, additional memory will be obtained from the system.
+  Destroying this space will deallocate all additionally allocated
+  space (if possible) but not the initial base.
+*/
+DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked);
+
+/*
+  mspace_track_large_chunks controls whether requests for large chunks
+  are allocated in their own untracked mmapped regions, separate from
+  others in this mspace. By default large chunks are not tracked,
+  which reduces fragmentation. However, such chunks are not
+  necessarily released to the system upon destroy_mspace.  Enabling
+  tracking by setting to true may increase fragmentation, but avoids
+  leakage when relying on destroy_mspace to release all memory
+  allocated using this space.  The function returns the previous
+  setting.
+*/
+DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable);
+
+
+/*
+  mspace_malloc behaves as malloc, but operates within
+  the given space.
+*/
+DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes);
+
+/*
+  mspace_free behaves as free, but operates within
+  the given space.
+
+  If compiled with FOOTERS==1, mspace_free is not actually needed.
+  free may be called instead of mspace_free because freed chunks from
+  any space are handled by their originating spaces.
+*/
+DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem);
+
+/*
+  mspace_realloc behaves as realloc, but operates within
+  the given space.
+
+  If compiled with FOOTERS==1, mspace_realloc is not actually
+  needed.  realloc may be called instead of mspace_realloc because
+  realloced chunks from any space are handled by their originating
+  spaces.
+*/
+DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize);
+
+/*
+  mspace_calloc behaves as calloc, but operates within
+  the given space.
+*/
+DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
+
+/*
+  mspace_memalign behaves as memalign, but operates within
+  the given space.
+*/
+DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
+
+/*
+  mspace_independent_calloc behaves as independent_calloc, but
+  operates within the given space.
+*/
+DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements,
+                                 size_t elem_size, void* chunks[]);
+
+/*
+  mspace_independent_comalloc behaves as independent_comalloc, but
+  operates within the given space.
+*/
+DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+                                   size_t sizes[], void* chunks[]);
+
+/*
+  mspace_footprint() returns the number of bytes obtained from the
+  system for this space.
+*/
+DLMALLOC_EXPORT size_t mspace_footprint(mspace msp);
+
+/*
+  mspace_max_footprint() returns the peak number of bytes obtained from the
+  system for this space.
+*/
+DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp);
+
+
+#if !NO_MALLINFO
+/*
+  mspace_mallinfo behaves as mallinfo, but reports properties of
+  the given space.
+*/
+DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp);
+#endif /* NO_MALLINFO */
+
+/*
+  malloc_usable_size(void* p) behaves the same as malloc_usable_size;
+*/
+DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem);
+
+/*
+  mspace_malloc_stats behaves as malloc_stats, but reports
+  properties of the given space.
+*/
+DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp);
+
+/*
+  mspace_trim behaves as malloc_trim, but
+  operates within the given space.
+*/
+DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad);
+
+/*
+  An alias for mallopt.
+*/
+DLMALLOC_EXPORT int mspace_mallopt(int, int);
+
+#endif /* MSPACES */
+
+#ifdef __cplusplus
+}  /* end of extern "C" */
+#endif /* __cplusplus */
+
+/*
+  ========================================================================
+  To make a fully customizable malloc.h header file, cut everything
+  above this line, put into file malloc.h, edit to suit, and #include it
+  on the next line, as well as in programs that use this malloc.
+  ========================================================================
+*/
+
+/* #include "malloc.h" */
+
+/*------------------------------ internal #includes ---------------------- */
+
+#ifdef _MSC_VER
+#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
+#endif /* _MSC_VER */
+#if !NO_MALLOC_STATS
+#include <stdio.h>       /* for printing in malloc_stats */
+#endif /* NO_MALLOC_STATS */
+#ifndef LACKS_ERRNO_H
+#include <errno.h>       /* for MALLOC_FAILURE_ACTION */
+#endif /* LACKS_ERRNO_H */
+#ifdef DEBUG
+#if ABORT_ON_ASSERT_FAILURE
+#undef assert
+#define assert(x) if(!(x)) ABORT
+#else /* ABORT_ON_ASSERT_FAILURE */
+#include <assert.h>
+#endif /* ABORT_ON_ASSERT_FAILURE */
+#else  /* DEBUG */
+#ifndef assert
+#define assert(x)
+#endif
+#define DEBUG 0
+#endif /* DEBUG */
+#if !defined(WIN32) && !defined(LACKS_TIME_H)
+#include <time.h>        /* for magic initialization */
+#endif /* WIN32 */
+#ifndef LACKS_STDLIB_H
+#include <stdlib.h>      /* for abort() */
+#endif /* LACKS_STDLIB_H */
+#ifndef LACKS_STRING_H
+#include <string.h>      /* for memset etc */
+#endif  /* LACKS_STRING_H */
+#if USE_BUILTIN_FFS
+#ifndef LACKS_STRINGS_H
+#include <strings.h>     /* for ffs */
+#endif /* LACKS_STRINGS_H */
+#endif /* USE_BUILTIN_FFS */
+#if HAVE_MMAP
+#ifndef LACKS_SYS_MMAN_H
+/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
+#if (defined(linux) && !defined(__USE_GNU))
+#define __USE_GNU 1
+#include <sys/mman.h>    /* for mmap */
+#undef __USE_GNU
+#else
+#include <sys/mman.h>    /* for mmap */
+#endif /* linux */
+#endif /* LACKS_SYS_MMAN_H */
+#ifndef LACKS_FCNTL_H
+#include <fcntl.h>
+#endif /* LACKS_FCNTL_H */
+#endif /* HAVE_MMAP */
+#ifndef LACKS_UNISTD_H
+#include <unistd.h>     /* for sbrk, sysconf */
+#else /* LACKS_UNISTD_H */
+#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
+extern void*     sbrk(ptrdiff_t);
+#endif /* FreeBSD etc */
+#endif /* LACKS_UNISTD_H */
+
+/* Declarations for locking */
+#if USE_LOCKS
+#ifndef WIN32
+#if defined (__SVR4) && defined (__sun)  /* solaris */
+#include <thread.h>
+#elif !defined(LACKS_SCHED_H)
+#include <sched.h>
+#endif /* solaris or LACKS_SCHED_H */
+#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
+#include <pthread.h>
+#endif /* USE_RECURSIVE_LOCKS ... */
+#elif defined(_MSC_VER)
+#ifndef _M_AMD64
+/* These are already defined on AMD64 builds */
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
+LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* _M_AMD64 */
+#pragma intrinsic (_InterlockedCompareExchange)
+#pragma intrinsic (_InterlockedExchange)
+#define interlockedcompareexchange _InterlockedCompareExchange
+#define interlockedexchange _InterlockedExchange
+#elif defined(WIN32) && defined(__GNUC__)
+#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
+#define interlockedexchange __sync_lock_test_and_set
+#endif /* Win32 */
+#else /* USE_LOCKS */
+#endif /* USE_LOCKS */
+
+#ifndef LOCK_AT_FORK
+#define LOCK_AT_FORK 0
+#endif
+
+/* Declarations for bit scanning on win32 */
+#if defined(_MSC_VER) && _MSC_VER>=1300
+#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
+unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#define BitScanForward _BitScanForward
+#define BitScanReverse _BitScanReverse
+#pragma intrinsic(_BitScanForward)
+#pragma intrinsic(_BitScanReverse)
+#endif /* BitScanForward */
+#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
+
+#ifndef WIN32
+#ifndef malloc_getpagesize
+#  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
+#    ifndef _SC_PAGE_SIZE
+#      define _SC_PAGE_SIZE _SC_PAGESIZE
+#    endif
+#  endif
+#  ifdef _SC_PAGE_SIZE
+#    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
+#  else
+#    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
+       extern size_t getpagesize();
+#      define malloc_getpagesize getpagesize()
+#    else
+#      ifdef WIN32 /* use supplied emulation of getpagesize */
+#        define malloc_getpagesize getpagesize()
+#      else
+#        ifndef LACKS_SYS_PARAM_H
+#          include <sys/param.h>
+#        endif
+#        ifdef EXEC_PAGESIZE
+#          define malloc_getpagesize EXEC_PAGESIZE
+#        else
+#          ifdef NBPG
+#            ifndef CLSIZE
+#              define malloc_getpagesize NBPG
+#            else
+#              define malloc_getpagesize (NBPG * CLSIZE)
+#            endif
+#          else
+#            ifdef NBPC
+#              define malloc_getpagesize NBPC
+#            else
+#              ifdef PAGESIZE
+#                define malloc_getpagesize PAGESIZE
+#              else /* just guess */
+#                define malloc_getpagesize ((size_t)4096U)
+#              endif
+#            endif
+#          endif
+#        endif
+#      endif
+#    endif
+#  endif
+#endif
+#endif
+
+/* ------------------- size_t and alignment properties -------------------- */
+
+/* The byte and bit size of a size_t */
+#define SIZE_T_SIZE         (sizeof(size_t))
+#define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
+
+/* Some constants coerced to size_t */
+/* Annoying but necessary to avoid errors on some platforms */
+#define SIZE_T_ZERO         ((size_t)0)
+#define SIZE_T_ONE          ((size_t)1)
+#define SIZE_T_TWO          ((size_t)2)
+#define SIZE_T_FOUR         ((size_t)4)
+#define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
+#define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
+#define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
+#define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
+
+/* The bit mask value corresponding to MALLOC_ALIGNMENT */
+#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
+
+/* True if address a has acceptable alignment */
+#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
+
+/* the number of bytes to offset an address to align it */
+#define align_offset(A)\
+ ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
+  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
+
+/* -------------------------- MMAP preliminaries ------------------------- */
+
+/*
+   If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
+   checks to fail so compiler optimizer can delete code rather than
+   using so many "#if"s.
+*/
+
+
+/* MORECORE and MMAP must return MFAIL on failure */
+#define MFAIL                ((void*)(MAX_SIZE_T))
+#define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
+
+#if HAVE_MMAP
+
+#ifndef WIN32
+#define MUNMAP_DEFAULT(a, s)  munmap((a), (s))
+#define MMAP_PROT            (PROT_READ|PROT_WRITE)
+#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
+#define MAP_ANONYMOUS        MAP_ANON
+#endif /* MAP_ANON */
+#ifdef MAP_ANONYMOUS
+#define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
+#define MMAP_DEFAULT(s)       mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
+#else /* MAP_ANONYMOUS */
+/*
+   Nearly all versions of mmap support MAP_ANONYMOUS, so the following
+   is unlikely to be needed, but is supplied just in case.
+*/
+#define MMAP_FLAGS           (MAP_PRIVATE)
+static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
+#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
+           (dev_zero_fd = open("/dev/zero", O_RDWR), \
+            mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
+            mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
+#endif /* MAP_ANONYMOUS */
+
+#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
+
+#else /* WIN32 */
+
+/* Win32 MMAP via VirtualAlloc */
+static FORCEINLINE void* win32mmap(size_t size) {
+  void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+  return (ptr != 0)? ptr: MFAIL;
+}
+
+/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
+static FORCEINLINE void* win32direct_mmap(size_t size) {
+  void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
+                           PAGE_READWRITE);
+  return (ptr != 0)? ptr: MFAIL;
+}
+
+/* This function supports releasing coalesed segments */
+static FORCEINLINE int win32munmap(void* ptr, size_t size) {
+  MEMORY_BASIC_INFORMATION minfo;
+  char* cptr = (char*)ptr;
+  while (size) {
+    if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
+      return -1;
+    if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
+        minfo.State != MEM_COMMIT || minfo.RegionSize > size)
+      return -1;
+    if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
+      return -1;
+    cptr += minfo.RegionSize;
+    size -= minfo.RegionSize;
+  }
+  return 0;
+}
+
+#define MMAP_DEFAULT(s)             win32mmap(s)
+#define MUNMAP_DEFAULT(a, s)        win32munmap((a), (s))
+#define DIRECT_MMAP_DEFAULT(s)      win32direct_mmap(s)
+#endif /* WIN32 */
+#endif /* HAVE_MMAP */
+
+#if HAVE_MREMAP
+#ifndef WIN32
+#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
+#endif /* WIN32 */
+#endif /* HAVE_MREMAP */
+
+/**
+ * Define CALL_MORECORE
+ */
+#if HAVE_MORECORE
+    #ifdef MORECORE
+        #define CALL_MORECORE(S)    MORECORE(S)
+    #else  /* MORECORE */
+        #define CALL_MORECORE(S)    MORECORE_DEFAULT(S)
+    #endif /* MORECORE */
+#else  /* HAVE_MORECORE */
+    #define CALL_MORECORE(S)        MFAIL
+#endif /* HAVE_MORECORE */
+
+/**
+ * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
+ */
+#if HAVE_MMAP
+    #define USE_MMAP_BIT            (SIZE_T_ONE)
+
+    #ifdef MMAP
+        #define CALL_MMAP(s)        MMAP(s)
+    #else /* MMAP */
+        #define CALL_MMAP(s)        MMAP_DEFAULT(s)
+    #endif /* MMAP */
+    #ifdef MUNMAP
+        #define CALL_MUNMAP(a, s)   MUNMAP((a), (s))
+    #else /* MUNMAP */
+        #define CALL_MUNMAP(a, s)   MUNMAP_DEFAULT((a), (s))
+    #endif /* MUNMAP */
+    #ifdef DIRECT_MMAP
+        #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
+    #else /* DIRECT_MMAP */
+        #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
+    #endif /* DIRECT_MMAP */
+#else  /* HAVE_MMAP */
+    #define USE_MMAP_BIT            (SIZE_T_ZERO)
+
+    #define MMAP(s)                 MFAIL
+    #define MUNMAP(a, s)            (-1)
+    #define DIRECT_MMAP(s)          MFAIL
+    #define CALL_DIRECT_MMAP(s)     DIRECT_MMAP(s)
+    #define CALL_MMAP(s)            MMAP(s)
+    #define CALL_MUNMAP(a, s)       MUNMAP((a), (s))
+#endif /* HAVE_MMAP */
+
+/**
+ * Define CALL_MREMAP
+ */
+#if HAVE_MMAP && HAVE_MREMAP
+    #ifdef MREMAP
+        #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
+    #else /* MREMAP */
+        #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
+    #endif /* MREMAP */
+#else  /* HAVE_MMAP && HAVE_MREMAP */
+    #define CALL_MREMAP(addr, osz, nsz, mv)     MFAIL
+#endif /* HAVE_MMAP && HAVE_MREMAP */
+
+/* mstate bit set if continguous morecore disabled or failed */
+#define USE_NONCONTIGUOUS_BIT (4U)
+
+/* segment bit set in create_mspace_with_base */
+#define EXTERN_BIT            (8U)
+
+
+/* --------------------------- Lock preliminaries ------------------------ */
+
+/*
+  When locks are defined, there is one global lock, plus
+  one per-mspace lock.
+
+  The global lock_ensures that mparams.magic and other unique
+  mparams values are initialized only once. It also protects
+  sequences of calls to MORECORE.  In many cases sys_alloc requires
+  two calls, that should not be interleaved with calls by other
+  threads.  This does not protect against direct calls to MORECORE
+  by other threads not using this lock, so there is still code to
+  cope the best we can on interference.
+
+  Per-mspace locks surround calls to malloc, free, etc.
+  By default, locks are simple non-reentrant mutexes.
+
+  Because lock-protected regions generally have bounded times, it is
+  OK to use the supplied simple spinlocks. Spinlocks are likely to
+  improve performance for lightly contended applications, but worsen
+  performance under heavy contention.
+
+  If USE_LOCKS is > 1, the definitions of lock routines here are
+  bypassed, in which case you will need to define the type MLOCK_T,
+  and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
+  and TRY_LOCK.  You must also declare a
+    static MLOCK_T malloc_global_mutex = { initialization values };.
+
+*/
+
+#if !USE_LOCKS
+#define USE_LOCK_BIT               (0U)
+#define INITIAL_LOCK(l)            (0)
+#define DESTROY_LOCK(l)            (0)
+#define ACQUIRE_MALLOC_GLOBAL_LOCK()
+#define RELEASE_MALLOC_GLOBAL_LOCK()
+
+#else
+#if USE_LOCKS > 1
+/* -----------------------  User-defined locks ------------------------ */
+/* Define your own lock implementation here */
+/* #define INITIAL_LOCK(lk)  ... */
+/* #define DESTROY_LOCK(lk)  ... */
+/* #define ACQUIRE_LOCK(lk)  ... */
+/* #define RELEASE_LOCK(lk)  ... */
+/* #define TRY_LOCK(lk) ... */
+/* static MLOCK_T malloc_global_mutex = ... */
+
+#elif USE_SPIN_LOCKS
+
+/* First, define CAS_LOCK and CLEAR_LOCK on ints */
+/* Note CAS_LOCK defined to return 0 on success */
+
+#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
+#define CAS_LOCK(sl)     __sync_lock_test_and_set(sl, 1)
+#define CLEAR_LOCK(sl)   __sync_lock_release(sl)
+
+#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
+/* Custom spin locks for older gcc on x86 */
+static FORCEINLINE int x86_cas_lock(int *sl) {
+  int ret;
+  int val = 1;
+  int cmp = 0;
+  __asm__ __volatile__  ("lock; cmpxchgl %1, %2"
+                         : "=a" (ret)
+                         : "r" (val), "m" (*(sl)), "0"(cmp)
+                         : "memory", "cc");
+  return ret;
+}
+
+static FORCEINLINE void x86_clear_lock(int* sl) {
+  assert(*sl != 0);
+  int prev = 0;
+  int ret;
+  __asm__ __volatile__ ("lock; xchgl %0, %1"
+                        : "=r" (ret)
+                        : "m" (*(sl)), "0"(prev)
+                        : "memory");
+}
+
+#define CAS_LOCK(sl)     x86_cas_lock(sl)
+#define CLEAR_LOCK(sl)   x86_clear_lock(sl)
+
+#else /* Win32 MSC */
+#define CAS_LOCK(sl)     interlockedexchange(sl, (LONG)1)
+#define CLEAR_LOCK(sl)   interlockedexchange (sl, (LONG)0)
+
+#endif /* ... gcc spins locks ... */
+
+/* How to yield for a spin lock */
+#define SPINS_PER_YIELD       63
+#if defined(_MSC_VER)
+#define SLEEP_EX_DURATION     50 /* delay for yield/sleep */
+#define SPIN_LOCK_YIELD  SleepEx(SLEEP_EX_DURATION, FALSE)
+#elif defined (__SVR4) && defined (__sun) /* solaris */
+#define SPIN_LOCK_YIELD   thr_yield();
+#elif !defined(LACKS_SCHED_H)
+#define SPIN_LOCK_YIELD   sched_yield();
+#else
+#define SPIN_LOCK_YIELD
+#endif /* ... yield ... */
+
+#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
+/* Plain spin locks use single word (embedded in malloc_states) */
+static int spin_acquire_lock(int *sl) {
+  int spins = 0;
+  while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
+    if ((++spins & SPINS_PER_YIELD) == 0) {
+      SPIN_LOCK_YIELD;
+    }
+  }
+  return 0;
+}
+
+#define MLOCK_T               int
+#define TRY_LOCK(sl)          !CAS_LOCK(sl)
+#define RELEASE_LOCK(sl)      CLEAR_LOCK(sl)
+#define ACQUIRE_LOCK(sl)      (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
+#define INITIAL_LOCK(sl)      (*sl = 0)
+#define DESTROY_LOCK(sl)      (0)
+static MLOCK_T malloc_global_mutex = 0;
+
+#else /* USE_RECURSIVE_LOCKS */
+/* types for lock owners */
+#ifdef WIN32
+#define THREAD_ID_T           DWORD
+#define CURRENT_THREAD        GetCurrentThreadId()
+#define EQ_OWNER(X,Y)         ((X) == (Y))
+#else
+/*
+  Note: the following assume that pthread_t is a type that can be
+  initialized to (casted) zero. If this is not the case, you will need to
+  somehow redefine these or not use spin locks.
+*/
+#define THREAD_ID_T           pthread_t
+#define CURRENT_THREAD        pthread_self()
+#define EQ_OWNER(X,Y)         pthread_equal(X, Y)
+#endif
+
+struct malloc_recursive_lock {
+  int sl;
+  unsigned int c;
+  THREAD_ID_T threadid;
+};
+
+#define MLOCK_T  struct malloc_recursive_lock
+static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
+
+static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
+  assert(lk->sl != 0);
+  if (--lk->c == 0) {
+    CLEAR_LOCK(&lk->sl);
+  }
+}
+
+static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
+  THREAD_ID_T mythreadid = CURRENT_THREAD;
+  int spins = 0;
+  for (;;) {
+    if (*((volatile int *)(&lk->sl)) == 0) {
+      if (!CAS_LOCK(&lk->sl)) {
+        lk->threadid = mythreadid;
+        lk->c = 1;
+        return 0;
+      }
+    }
+    else if (EQ_OWNER(lk->threadid, mythreadid)) {
+      ++lk->c;
+      return 0;
+    }
+    if ((++spins & SPINS_PER_YIELD) == 0) {
+      SPIN_LOCK_YIELD;
+    }
+  }
+}
+
+static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
+  THREAD_ID_T mythreadid = CURRENT_THREAD;
+  if (*((volatile int *)(&lk->sl)) == 0) {
+    if (!CAS_LOCK(&lk->sl)) {
+      lk->threadid = mythreadid;
+      lk->c = 1;
+      return 1;
+    }
+  }
+  else if (EQ_OWNER(lk->threadid, mythreadid)) {
+    ++lk->c;
+    return 1;
+  }
+  return 0;
+}
+
+#define RELEASE_LOCK(lk)      recursive_release_lock(lk)
+#define TRY_LOCK(lk)          recursive_try_lock(lk)
+#define ACQUIRE_LOCK(lk)      recursive_acquire_lock(lk)
+#define INITIAL_LOCK(lk)      ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
+#define DESTROY_LOCK(lk)      (0)
+#endif /* USE_RECURSIVE_LOCKS */
+
+#elif defined(WIN32) /* Win32 critical sections */
+#define MLOCK_T               CRITICAL_SECTION
+#define ACQUIRE_LOCK(lk)      (EnterCriticalSection(lk), 0)
+#define RELEASE_LOCK(lk)      LeaveCriticalSection(lk)
+#define TRY_LOCK(lk)          TryEnterCriticalSection(lk)
+#define INITIAL_LOCK(lk)      (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
+#define DESTROY_LOCK(lk)      (DeleteCriticalSection(lk), 0)
+#define NEED_GLOBAL_LOCK_INIT
+
+static MLOCK_T malloc_global_mutex;
+static volatile LONG malloc_global_mutex_status;
+
+/* Use spin loop to initialize global lock */
+static void init_malloc_global_mutex() {
+  for (;;) {
+    long stat = malloc_global_mutex_status;
+    if (stat > 0)
+      return;
+    /* transition to < 0 while initializing, then to > 0) */
+    if (stat == 0 &&
+        interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
+      InitializeCriticalSection(&malloc_global_mutex);
+      interlockedexchange(&malloc_global_mutex_status, (LONG)1);
+      return;
+    }
+    SleepEx(0, FALSE);
+  }
+}
+
+#else /* pthreads-based locks */
+#define MLOCK_T               pthread_mutex_t
+#define ACQUIRE_LOCK(lk)      pthread_mutex_lock(lk)
+#define RELEASE_LOCK(lk)      pthread_mutex_unlock(lk)
+#define TRY_LOCK(lk)          (!pthread_mutex_trylock(lk))
+#define INITIAL_LOCK(lk)      pthread_init_lock(lk)
+#define DESTROY_LOCK(lk)      pthread_mutex_destroy(lk)
+
+#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
+/* Cope with old-style linux recursive lock initialization by adding */
+/* skipped internal declaration from pthread.h */
+extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
+                                              int __kind));
+#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
+#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
+#endif /* USE_RECURSIVE_LOCKS ... */
+
+static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static int pthread_init_lock (MLOCK_T *lk) {
+  pthread_mutexattr_t attr;
+  if (pthread_mutexattr_init(&attr)) return 1;
+#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
+  if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
+#endif
+  if (pthread_mutex_init(lk, &attr)) return 1;
+  if (pthread_mutexattr_destroy(&attr)) return 1;
+  return 0;
+}
+
+#endif /* ... lock types ... */
+
+/* Common code for all lock types */
+#define USE_LOCK_BIT               (2U)
+
+#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
+#define ACQUIRE_MALLOC_GLOBAL_LOCK()  ACQUIRE_LOCK(&malloc_global_mutex);
+#endif
+
+#ifndef RELEASE_MALLOC_GLOBAL_LOCK
+#define RELEASE_MALLOC_GLOBAL_LOCK()  RELEASE_LOCK(&malloc_global_mutex);
+#endif
+
+#endif /* USE_LOCKS */
+
+/* -----------------------  Chunk representations ------------------------ */
+
+/*
+  (The following includes lightly edited explanations by Colin Plumb.)
+
+  The malloc_chunk declaration below is misleading (but accurate and
+  necessary).  It declares a "view" into memory allowing access to
+  necessary fields at known offsets from a given base.
+
+  Chunks of memory are maintained using a `boundary tag' method as
+  originally described by Knuth.  (See the paper by Paul Wilson
+  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
+  techniques.)  Sizes of free chunks are stored both in the front of
+  each chunk and at the end.  This makes consolidating fragmented
+  chunks into bigger chunks fast.  The head fields also hold bits
+  representing whether chunks are free or in use.
+
+  Here are some pictures to make it clearer.  They are "exploded" to
+  show that the state of a chunk can be thought of as extending from
+  the high 31 bits of the head field of its header through the
+  prev_foot and PINUSE_BIT bit of the following chunk header.
+
+  A chunk that's in use looks like:
+
+   chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+           | Size of previous chunk (if P = 0)                             |
+           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
+         | Size of this chunk                                         1| +-+
+   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         |                                                               |
+         +-                                                             -+
+         |                                                               |
+         +-                                                             -+
+         |                                                               :
+         +-      size - sizeof(size_t) available payload bytes          -+
+         :                                                               |
+ chunk-> +-                                                             -+
+         |                                                               |
+         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
+       | Size of next chunk (may or may not be in use)               | +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+    And if it's free, it looks like this:
+
+   chunk-> +-                                                             -+
+           | User payload (must be in use, or we would have merged!)       |
+           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
+         | Size of this chunk                                         0| +-+
+   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         | Next pointer                                                  |
+         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         | Prev pointer                                                  |
+         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         |                                                               :
+         +-      size - sizeof(struct chunk) unused bytes               -+
+         :                                                               |
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         | Size of this chunk                                            |
+         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
+       | Size of next chunk (must be in use, or we would have merged)| +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+       |                                                               :
+       +- User payload                                                -+
+       :                                                               |
+       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+                                                                     |0|
+                                                                     +-+
+  Note that since we always merge adjacent free chunks, the chunks
+  adjacent to a free chunk must be in use.
+
+  Given a pointer to a chunk (which can be derived trivially from the
+  payload pointer) we can, in O(1) time, find out whether the adjacent
+  chunks are free, and if so, unlink them from the lists that they
+  are on and merge them with the current chunk.
+
+  Chunks always begin on even word boundaries, so the mem portion
+  (which is returned to the user) is also on an even word boundary, and
+  thus at least double-word aligned.
+
+  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
+  chunk size (which is always a multiple of two words), is an in-use
+  bit for the *previous* chunk.  If that bit is *clear*, then the
+  word before the current chunk size contains the previous chunk
+  size, and can be used to find the front of the previous chunk.
+  The very first chunk allocated always has this bit set, preventing
+  access to non-existent (or non-owned) memory. If pinuse is set for
+  any given chunk, then you CANNOT determine the size of the
+  previous chunk, and might even get a memory addressing fault when
+  trying to do so.
+
+  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
+  the chunk size redundantly records whether the current chunk is
+  inuse (unless the chunk is mmapped). This redundancy enables usage
+  checks within free and realloc, and reduces indirection when freeing
+  and consolidating chunks.
+
+  Each freshly allocated chunk must have both cinuse and pinuse set.
+  That is, each allocated chunk borders either a previously allocated
+  and still in-use chunk, or the base of its memory arena. This is
+  ensured by making all allocatio

<TRUNCATED>

[4/6] arrow git commit: ARROW-1104: Integrate in-memory object store into arrow

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/store.cc
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/store.cc b/cpp/src/plasma/store.cc
new file mode 100644
index 0000000..5151a44
--- /dev/null
+++ b/cpp/src/plasma/store.cc
@@ -0,0 +1,681 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// PLASMA STORE: This is a simple object store server process
+//
+// It accepts incoming client connections on a unix domain socket
+// (name passed in via the -s option of the executable) and uses a
+// single thread to serve the clients. Each client establishes a
+// connection and can create objects, wait for objects and seal
+// objects through that connection.
+//
+// It keeps a hash table that maps object_ids (which are 20 byte long,
+// just enough to store and SHA1 hash) to memory mapped files.
+
+#include "plasma/store.h"
+
+#include <assert.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <limits.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/statvfs.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include <deque>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "format/common_generated.h"
+#include "plasma/common.h"
+#include "plasma/fling.h"
+#include "plasma/io.h"
+#include "plasma/malloc.h"
+
+extern "C" {
+void* dlmalloc(size_t bytes);
+void* dlmemalign(size_t alignment, size_t bytes);
+void dlfree(void* mem);
+size_t dlmalloc_set_footprint_limit(size_t bytes);
+}
+
+struct GetRequest {
+  GetRequest(Client* client, const std::vector<ObjectID>& object_ids);
+  /// The client that called get.
+  Client* client;
+  /// The ID of the timer that will time out and cause this wait to return to
+  ///  the client if it hasn't already returned.
+  int64_t timer;
+  /// The object IDs involved in this request. This is used in the reply.
+  std::vector<ObjectID> object_ids;
+  /// The object information for the objects in this request. This is used in
+  /// the reply.
+  std::unordered_map<ObjectID, PlasmaObject, UniqueIDHasher> objects;
+  /// The minimum number of objects to wait for in this request.
+  int64_t num_objects_to_wait_for;
+  /// The number of object requests in this wait request that are already
+  /// satisfied.
+  int64_t num_satisfied;
+};
+
+GetRequest::GetRequest(Client* client, const std::vector<ObjectID>& object_ids)
+    : client(client),
+      timer(-1),
+      object_ids(object_ids.begin(), object_ids.end()),
+      objects(object_ids.size()),
+      num_satisfied(0) {
+  std::unordered_set<ObjectID, UniqueIDHasher> unique_ids(
+      object_ids.begin(), object_ids.end());
+  num_objects_to_wait_for = unique_ids.size();
+}
+
+Client::Client(int fd) : fd(fd) {}
+
+PlasmaStore::PlasmaStore(EventLoop* loop, int64_t system_memory)
+    : loop_(loop), eviction_policy_(&store_info_) {
+  store_info_.memory_capacity = system_memory;
+}
+
+// TODO(pcm): Get rid of this destructor by using RAII to clean up data.
+PlasmaStore::~PlasmaStore() {
+  for (const auto& element : pending_notifications_) {
+    auto object_notifications = element.second.object_notifications;
+    for (size_t i = 0; i < object_notifications.size(); ++i) {
+      uint8_t* notification = reinterpret_cast<uint8_t*>(object_notifications.at(i));
+      uint8_t* data = notification;
+      // TODO(pcm): Get rid of this delete.
+      delete[] data;
+    }
+  }
+}
+
+// If this client is not already using the object, add the client to the
+// object's list of clients, otherwise do nothing.
+void PlasmaStore::add_client_to_object_clients(ObjectTableEntry* entry, Client* client) {
+  // Check if this client is already using the object.
+  if (entry->clients.find(client) != entry->clients.end()) { return; }
+  // If there are no other clients using this object, notify the eviction policy
+  // that the object is being used.
+  if (entry->clients.size() == 0) {
+    // Tell the eviction policy that this object is being used.
+    std::vector<ObjectID> objects_to_evict;
+    eviction_policy_.begin_object_access(entry->object_id, &objects_to_evict);
+    delete_objects(objects_to_evict);
+  }
+  // Add the client pointer to the list of clients using this object.
+  entry->clients.insert(client);
+}
+
+// Create a new object buffer in the hash table.
+int PlasmaStore::create_object(const ObjectID& object_id, int64_t data_size,
+    int64_t metadata_size, Client* client, PlasmaObject* result) {
+  ARROW_LOG(DEBUG) << "creating object " << object_id.hex();
+  if (store_info_.objects.count(object_id) != 0) {
+    // There is already an object with the same ID in the Plasma Store, so
+    // ignore this requst.
+    return PlasmaError_ObjectExists;
+  }
+  // Try to evict objects until there is enough space.
+  uint8_t* pointer;
+  do {
+    // Allocate space for the new object. We use dlmemalign instead of dlmalloc
+    // in order to align the allocated region to a 64-byte boundary. This is not
+    // strictly necessary, but it is an optimization that could speed up the
+    // computation of a hash of the data (see compute_object_hash_parallel in
+    // plasma_client.cc). Note that even though this pointer is 64-byte aligned,
+    // it is not guaranteed that the corresponding pointer in the client will be
+    // 64-byte aligned, but in practice it often will be.
+    pointer =
+        reinterpret_cast<uint8_t*>(dlmemalign(BLOCK_SIZE, data_size + metadata_size));
+    if (pointer == NULL) {
+      // Tell the eviction policy how much space we need to create this object.
+      std::vector<ObjectID> objects_to_evict;
+      bool success =
+          eviction_policy_.require_space(data_size + metadata_size, &objects_to_evict);
+      delete_objects(objects_to_evict);
+      // Return an error to the client if not enough space could be freed to
+      // create the object.
+      if (!success) { return PlasmaError_OutOfMemory; }
+    }
+  } while (pointer == NULL);
+  int fd;
+  int64_t map_size;
+  ptrdiff_t offset;
+  get_malloc_mapinfo(pointer, &fd, &map_size, &offset);
+  assert(fd != -1);
+
+  auto entry = std::unique_ptr<ObjectTableEntry>(new ObjectTableEntry());
+  entry->object_id = object_id;
+  entry->info.object_id = object_id.binary();
+  entry->info.data_size = data_size;
+  entry->info.metadata_size = metadata_size;
+  entry->pointer = pointer;
+  // TODO(pcm): Set the other fields.
+  entry->fd = fd;
+  entry->map_size = map_size;
+  entry->offset = offset;
+  entry->state = PLASMA_CREATED;
+
+  store_info_.objects[object_id] = std::move(entry);
+  result->handle.store_fd = fd;
+  result->handle.mmap_size = map_size;
+  result->data_offset = offset;
+  result->metadata_offset = offset + data_size;
+  result->data_size = data_size;
+  result->metadata_size = metadata_size;
+  // Notify the eviction policy that this object was created. This must be done
+  // immediately before the call to add_client_to_object_clients so that the
+  // eviction policy does not have an opportunity to evict the object.
+  eviction_policy_.object_created(object_id);
+  // Record that this client is using this object.
+  add_client_to_object_clients(store_info_.objects[object_id].get(), client);
+  return PlasmaError_OK;
+}
+
+void PlasmaObject_init(PlasmaObject* object, ObjectTableEntry* entry) {
+  DCHECK(object != NULL);
+  DCHECK(entry != NULL);
+  DCHECK(entry->state == PLASMA_SEALED);
+  object->handle.store_fd = entry->fd;
+  object->handle.mmap_size = entry->map_size;
+  object->data_offset = entry->offset;
+  object->metadata_offset = entry->offset + entry->info.data_size;
+  object->data_size = entry->info.data_size;
+  object->metadata_size = entry->info.metadata_size;
+}
+
+void PlasmaStore::return_from_get(GetRequest* get_req) {
+  // Send the get reply to the client.
+  Status s = SendGetReply(get_req->client->fd, &get_req->object_ids[0], get_req->objects,
+      get_req->object_ids.size());
+  warn_if_sigpipe(s.ok() ? 0 : -1, get_req->client->fd);
+  // If we successfully sent the get reply message to the client, then also send
+  // the file descriptors.
+  if (s.ok()) {
+    // Send all of the file descriptors for the present objects.
+    for (const auto& object_id : get_req->object_ids) {
+      PlasmaObject& object = get_req->objects[object_id];
+      // We use the data size to indicate whether the object is present or not.
+      if (object.data_size != -1) {
+        int error_code = send_fd(get_req->client->fd, object.handle.store_fd);
+        // If we failed to send the file descriptor, loop until we have sent it
+        // successfully. TODO(rkn): This is problematic for two reasons. First
+        // of all, sending the file descriptor should just succeed without any
+        // errors, but sometimes I see a "Message too long" error number.
+        // Second, looping like this allows a client to potentially block the
+        // plasma store event loop which should never happen.
+        while (error_code < 0) {
+          if (errno == EMSGSIZE) {
+            ARROW_LOG(WARNING) << "Failed to send file descriptor, retrying.";
+            error_code = send_fd(get_req->client->fd, object.handle.store_fd);
+            continue;
+          }
+          warn_if_sigpipe(error_code, get_req->client->fd);
+          break;
+        }
+      }
+    }
+  }
+
+  // Remove the get request from each of the relevant object_get_requests hash
+  // tables if it is present there. It should only be present there if the get
+  // request timed out.
+  for (ObjectID& object_id : get_req->object_ids) {
+    auto& get_requests = object_get_requests_[object_id];
+    // Erase get_req from the vector.
+    auto it = std::find(get_requests.begin(), get_requests.end(), get_req);
+    if (it != get_requests.end()) { get_requests.erase(it); }
+  }
+  // Remove the get request.
+  if (get_req->timer != -1) { ARROW_CHECK(loop_->remove_timer(get_req->timer) == AE_OK); }
+  delete get_req;
+}
+
+void PlasmaStore::update_object_get_requests(const ObjectID& object_id) {
+  std::vector<GetRequest*>& get_requests = object_get_requests_[object_id];
+  size_t index = 0;
+  size_t num_requests = get_requests.size();
+  for (size_t i = 0; i < num_requests; ++i) {
+    GetRequest* get_req = get_requests[index];
+    auto entry = get_object_table_entry(&store_info_, object_id);
+    ARROW_CHECK(entry != NULL);
+
+    PlasmaObject_init(&get_req->objects[object_id], entry);
+    get_req->num_satisfied += 1;
+    // Record the fact that this client will be using this object and will
+    // be responsible for releasing this object.
+    add_client_to_object_clients(entry, get_req->client);
+
+    // If this get request is done, reply to the client.
+    if (get_req->num_satisfied == get_req->num_objects_to_wait_for) {
+      return_from_get(get_req);
+    } else {
+      // The call to return_from_get will remove the current element in the
+      // array, so we only increment the counter in the else branch.
+      index += 1;
+    }
+  }
+
+  DCHECK(index == get_requests.size());
+  // Remove the array of get requests for this object, since no one should be
+  // waiting for this object anymore.
+  object_get_requests_.erase(object_id);
+}
+
+void PlasmaStore::process_get_request(
+    Client* client, const std::vector<ObjectID>& object_ids, int64_t timeout_ms) {
+  // Create a get request for this object.
+  GetRequest* get_req = new GetRequest(client, object_ids);
+
+  for (auto object_id : object_ids) {
+    // Check if this object is already present locally. If so, record that the
+    // object is being used and mark it as accounted for.
+    auto entry = get_object_table_entry(&store_info_, object_id);
+    if (entry && entry->state == PLASMA_SEALED) {
+      // Update the get request to take into account the present object.
+      PlasmaObject_init(&get_req->objects[object_id], entry);
+      get_req->num_satisfied += 1;
+      // If necessary, record that this client is using this object. In the case
+      // where entry == NULL, this will be called from seal_object.
+      add_client_to_object_clients(entry, client);
+    } else {
+      // Add a placeholder plasma object to the get request to indicate that the
+      // object is not present. This will be parsed by the client. We set the
+      // data size to -1 to indicate that the object is not present.
+      get_req->objects[object_id].data_size = -1;
+      // Add the get request to the relevant data structures.
+      object_get_requests_[object_id].push_back(get_req);
+    }
+  }
+
+  // If all of the objects are present already or if the timeout is 0, return to
+  // the client.
+  if (get_req->num_satisfied == get_req->num_objects_to_wait_for || timeout_ms == 0) {
+    return_from_get(get_req);
+  } else if (timeout_ms != -1) {
+    // Set a timer that will cause the get request to return to the client. Note
+    // that a timeout of -1 is used to indicate that no timer should be set.
+    get_req->timer = loop_->add_timer(timeout_ms, [this, get_req](int64_t timer_id) {
+      return_from_get(get_req);
+      return kEventLoopTimerDone;
+    });
+  }
+}
+
+int PlasmaStore::remove_client_from_object_clients(
+    ObjectTableEntry* entry, Client* client) {
+  auto it = entry->clients.find(client);
+  if (it != entry->clients.end()) {
+    entry->clients.erase(it);
+    // If no more clients are using this object, notify the eviction policy
+    // that the object is no longer being used.
+    if (entry->clients.size() == 0) {
+      // Tell the eviction policy that this object is no longer being used.
+      std::vector<ObjectID> objects_to_evict;
+      eviction_policy_.end_object_access(entry->object_id, &objects_to_evict);
+      delete_objects(objects_to_evict);
+    }
+    // Return 1 to indicate that the client was removed.
+    return 1;
+  } else {
+    // Return 0 to indicate that the client was not removed.
+    return 0;
+  }
+}
+
+void PlasmaStore::release_object(const ObjectID& object_id, Client* client) {
+  auto entry = get_object_table_entry(&store_info_, object_id);
+  ARROW_CHECK(entry != NULL);
+  // Remove the client from the object's array of clients.
+  ARROW_CHECK(remove_client_from_object_clients(entry, client) == 1);
+}
+
+// Check if an object is present.
+int PlasmaStore::contains_object(const ObjectID& object_id) {
+  auto entry = get_object_table_entry(&store_info_, object_id);
+  return entry && (entry->state == PLASMA_SEALED) ? OBJECT_FOUND : OBJECT_NOT_FOUND;
+}
+
+// Seal an object that has been created in the hash table.
+void PlasmaStore::seal_object(const ObjectID& object_id, unsigned char digest[]) {
+  ARROW_LOG(DEBUG) << "sealing object " << object_id.hex();
+  auto entry = get_object_table_entry(&store_info_, object_id);
+  ARROW_CHECK(entry != NULL);
+  ARROW_CHECK(entry->state == PLASMA_CREATED);
+  // Set the state of object to SEALED.
+  entry->state = PLASMA_SEALED;
+  // Set the object digest.
+  entry->info.digest = std::string(reinterpret_cast<char*>(&digest[0]), kDigestSize);
+  // Inform all subscribers that a new object has been sealed.
+  push_notification(&entry->info);
+
+  // Update all get requests that involve this object.
+  update_object_get_requests(object_id);
+}
+
+void PlasmaStore::delete_objects(const std::vector<ObjectID>& object_ids) {
+  for (const auto& object_id : object_ids) {
+    ARROW_LOG(DEBUG) << "deleting object " << object_id.hex();
+    auto entry = get_object_table_entry(&store_info_, object_id);
+    // TODO(rkn): This should probably not fail, but should instead throw an
+    // error. Maybe we should also support deleting objects that have been
+    // created but not sealed.
+    ARROW_CHECK(entry != NULL) << "To delete an object it must be in the object table.";
+    ARROW_CHECK(entry->state == PLASMA_SEALED)
+        << "To delete an object it must have been sealed.";
+    ARROW_CHECK(entry->clients.size() == 0)
+        << "To delete an object, there must be no clients currently using it.";
+    dlfree(entry->pointer);
+    store_info_.objects.erase(object_id);
+    // Inform all subscribers that the object has been deleted.
+    ObjectInfoT notification;
+    notification.object_id = object_id.binary();
+    notification.is_deletion = true;
+    push_notification(&notification);
+  }
+}
+
+void PlasmaStore::connect_client(int listener_sock) {
+  int client_fd = AcceptClient(listener_sock);
+  // This is freed in disconnect_client.
+  Client* client = new Client(client_fd);
+  // Add a callback to handle events on this socket.
+  // TODO(pcm): Check return value.
+  loop_->add_file_event(
+      client_fd, kEventLoopRead, [this, client](int events) { process_message(client); });
+  ARROW_LOG(DEBUG) << "New connection with fd " << client_fd;
+}
+
+void PlasmaStore::disconnect_client(Client* client) {
+  ARROW_CHECK(client != NULL);
+  ARROW_CHECK(client->fd > 0);
+  loop_->remove_file_event(client->fd);
+  // Close the socket.
+  close(client->fd);
+  ARROW_LOG(INFO) << "Disconnecting client on fd " << client->fd;
+  // If this client was using any objects, remove it from the appropriate
+  // lists.
+  for (const auto& entry : store_info_.objects) {
+    remove_client_from_object_clients(entry.second.get(), client);
+  }
+  // Note, the store may still attempt to send a message to the disconnected
+  // client (for example, when an object ID that the client was waiting for
+  // is ready). In these cases, the attempt to send the message will fail, but
+  // the store should just ignore the failure.
+  delete client;
+}
+
+/// Send notifications about sealed objects to the subscribers. This is called
+/// in seal_object. If the socket's send buffer is full, the notification will
+/// be
+/// buffered, and this will be called again when the send buffer has room.
+///
+/// @param client The client to send the notification to.
+/// @return Void.
+void PlasmaStore::send_notifications(int client_fd) {
+  auto it = pending_notifications_.find(client_fd);
+
+  int num_processed = 0;
+  bool closed = false;
+  // Loop over the array of pending notifications and send as many of them as
+  // possible.
+  for (size_t i = 0; i < it->second.object_notifications.size(); ++i) {
+    uint8_t* notification =
+        reinterpret_cast<uint8_t*>(it->second.object_notifications.at(i));
+    // Decode the length, which is the first bytes of the message.
+    int64_t size = *(reinterpret_cast<int64_t*>(notification));
+
+    // Attempt to send a notification about this object ID.
+    ssize_t nbytes = send(client_fd, notification, sizeof(int64_t) + size, 0);
+    if (nbytes >= 0) {
+      ARROW_CHECK(nbytes == static_cast<ssize_t>(sizeof(int64_t)) + size);
+    } else if (nbytes == -1 &&
+               (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)) {
+      ARROW_LOG(DEBUG) << "The socket's send buffer is full, so we are caching this "
+                          "notification and will send it later.";
+      // Add a callback to the event loop to send queued notifications whenever
+      // there is room in the socket's send buffer. Callbacks can be added
+      // more than once here and will be overwritten. The callback is removed
+      // at the end of the method.
+      // TODO(pcm): Introduce status codes and check in case the file descriptor
+      // is added twice.
+      loop_->add_file_event(client_fd, kEventLoopWrite,
+          [this, client_fd](int events) { send_notifications(client_fd); });
+      break;
+    } else {
+      ARROW_LOG(WARNING) << "Failed to send notification to client on fd " << client_fd;
+      if (errno == EPIPE) {
+        closed = true;
+        break;
+      }
+    }
+    num_processed += 1;
+    // The corresponding malloc happened in create_object_info_buffer
+    // within push_notification.
+    delete[] notification;
+  }
+  // Remove the sent notifications from the array.
+  it->second.object_notifications.erase(it->second.object_notifications.begin(),
+      it->second.object_notifications.begin() + num_processed);
+
+  // Stop sending notifications if the pipe was broken.
+  if (closed) {
+    close(client_fd);
+    pending_notifications_.erase(client_fd);
+  }
+
+  // If we have sent all notifications, remove the fd from the event loop.
+  if (it->second.object_notifications.empty()) { loop_->remove_file_event(client_fd); }
+}
+
+void PlasmaStore::push_notification(ObjectInfoT* object_info) {
+  for (auto& element : pending_notifications_) {
+    uint8_t* notification = create_object_info_buffer(object_info);
+    element.second.object_notifications.push_back(notification);
+    send_notifications(element.first);
+    // The notification gets freed in send_notifications when the notification
+    // is sent over the socket.
+  }
+}
+
+// Subscribe to notifications about sealed objects.
+void PlasmaStore::subscribe_to_updates(Client* client) {
+  ARROW_LOG(DEBUG) << "subscribing to updates on fd " << client->fd;
+  // TODO(rkn): The store could block here if the client doesn't send a file
+  // descriptor.
+  int fd = recv_fd(client->fd);
+  if (fd < 0) {
+    // This may mean that the client died before sending the file descriptor.
+    ARROW_LOG(WARNING) << "Failed to receive file descriptor from client on fd "
+                       << client->fd << ".";
+    return;
+  }
+
+  // Create a new array to buffer notifications that can't be sent to the
+  // subscriber yet because the socket send buffer is full. TODO(rkn): the queue
+  // never gets freed.
+  // TODO(pcm): Is the following neccessary?
+  pending_notifications_[fd];
+
+  // Push notifications to the new subscriber about existing objects.
+  for (const auto& entry : store_info_.objects) {
+    push_notification(&entry.second->info);
+  }
+  send_notifications(fd);
+}
+
+Status PlasmaStore::process_message(Client* client) {
+  int64_t type;
+  Status s = ReadMessage(client->fd, &type, &input_buffer_);
+  ARROW_CHECK(s.ok() || s.IsIOError());
+
+  uint8_t* input = input_buffer_.data();
+  ObjectID object_id;
+  PlasmaObject object;
+  // TODO(pcm): Get rid of the following.
+  memset(&object, 0, sizeof(object));
+
+  // Process the different types of requests.
+  switch (type) {
+    case MessageType_PlasmaCreateRequest: {
+      int64_t data_size;
+      int64_t metadata_size;
+      RETURN_NOT_OK(ReadCreateRequest(input, &object_id, &data_size, &metadata_size));
+      int error_code =
+          create_object(object_id, data_size, metadata_size, client, &object);
+      HANDLE_SIGPIPE(
+          SendCreateReply(client->fd, object_id, &object, error_code), client->fd);
+      if (error_code == PlasmaError_OK) {
+        warn_if_sigpipe(send_fd(client->fd, object.handle.store_fd), client->fd);
+      }
+    } break;
+    case MessageType_PlasmaGetRequest: {
+      std::vector<ObjectID> object_ids_to_get;
+      int64_t timeout_ms;
+      RETURN_NOT_OK(ReadGetRequest(input, object_ids_to_get, &timeout_ms));
+      process_get_request(client, object_ids_to_get, timeout_ms);
+    } break;
+    case MessageType_PlasmaReleaseRequest:
+      RETURN_NOT_OK(ReadReleaseRequest(input, &object_id));
+      release_object(object_id, client);
+      break;
+    case MessageType_PlasmaContainsRequest:
+      RETURN_NOT_OK(ReadContainsRequest(input, &object_id));
+      if (contains_object(object_id) == OBJECT_FOUND) {
+        HANDLE_SIGPIPE(SendContainsReply(client->fd, object_id, 1), client->fd);
+      } else {
+        HANDLE_SIGPIPE(SendContainsReply(client->fd, object_id, 0), client->fd);
+      }
+      break;
+    case MessageType_PlasmaSealRequest: {
+      unsigned char digest[kDigestSize];
+      RETURN_NOT_OK(ReadSealRequest(input, &object_id, &digest[0]));
+      seal_object(object_id, &digest[0]);
+    } break;
+    case MessageType_PlasmaEvictRequest: {
+      // This code path should only be used for testing.
+      int64_t num_bytes;
+      RETURN_NOT_OK(ReadEvictRequest(input, &num_bytes));
+      std::vector<ObjectID> objects_to_evict;
+      int64_t num_bytes_evicted =
+          eviction_policy_.choose_objects_to_evict(num_bytes, &objects_to_evict);
+      delete_objects(objects_to_evict);
+      HANDLE_SIGPIPE(SendEvictReply(client->fd, num_bytes_evicted), client->fd);
+    } break;
+    case MessageType_PlasmaSubscribeRequest:
+      subscribe_to_updates(client);
+      break;
+    case MessageType_PlasmaConnectRequest: {
+      HANDLE_SIGPIPE(
+          SendConnectReply(client->fd, store_info_.memory_capacity), client->fd);
+    } break;
+    case DISCONNECT_CLIENT:
+      ARROW_LOG(DEBUG) << "Disconnecting client on fd " << client->fd;
+      disconnect_client(client);
+      break;
+    default:
+      // This code should be unreachable.
+      ARROW_CHECK(0);
+  }
+  return Status::OK();
+}
+
+// Report "success" to valgrind.
+void signal_handler(int signal) {
+  if (signal == SIGTERM) { exit(0); }
+}
+
+void start_server(char* socket_name, int64_t system_memory) {
+  // Ignore SIGPIPE signals. If we don't do this, then when we attempt to write
+  // to a client that has already died, the store could die.
+  signal(SIGPIPE, SIG_IGN);
+  // Create the event loop.
+  EventLoop loop;
+  PlasmaStore store(&loop, system_memory);
+  int socket = bind_ipc_sock(socket_name, true);
+  ARROW_CHECK(socket >= 0);
+  // TODO(pcm): Check return value.
+  loop.add_file_event(socket, kEventLoopRead,
+      [&store, socket](int events) { store.connect_client(socket); });
+  loop.run();
+}
+
+int main(int argc, char* argv[]) {
+  signal(SIGTERM, signal_handler);
+  char* socket_name = NULL;
+  int64_t system_memory = -1;
+  int c;
+  while ((c = getopt(argc, argv, "s:m:")) != -1) {
+    switch (c) {
+      case 's':
+        socket_name = optarg;
+        break;
+      case 'm': {
+        char extra;
+        int scanned = sscanf(optarg, "%" SCNd64 "%c", &system_memory, &extra);
+        ARROW_CHECK(scanned == 1);
+        ARROW_LOG(INFO) << "Allowing the Plasma store to use up to "
+                        << static_cast<double>(system_memory) / 1000000000
+                        << "GB of memory.";
+        break;
+      }
+      default:
+        exit(-1);
+    }
+  }
+  if (!socket_name) {
+    ARROW_LOG(FATAL) << "please specify socket for incoming connections with -s switch";
+  }
+  if (system_memory == -1) {
+    ARROW_LOG(FATAL) << "please specify the amount of system memory with -m switch";
+  }
+#ifdef __linux__
+  // On Linux, check that the amount of memory available in /dev/shm is large
+  // enough to accommodate the request. If it isn't, then fail.
+  int shm_fd = open("/dev/shm", O_RDONLY);
+  struct statvfs shm_vfs_stats;
+  fstatvfs(shm_fd, &shm_vfs_stats);
+  // The value shm_vfs_stats.f_bsize is the block size, and the value
+  // shm_vfs_stats.f_bavail is the number of available blocks.
+  int64_t shm_mem_avail = shm_vfs_stats.f_bsize * shm_vfs_stats.f_bavail;
+  close(shm_fd);
+  if (system_memory > shm_mem_avail) {
+    ARROW_LOG(FATAL) << "System memory request exceeds memory available in /dev/shm. The "
+                        "request is for "
+                     << system_memory << " bytes, and the amount available is "
+                     << shm_mem_avail
+                     << " bytes. You may be able to free up space by deleting files in "
+                        "/dev/shm. If you are inside a Docker container, you may need to "
+                        "pass "
+                        "an argument with the flag '--shm-size' to 'docker run'.";
+  }
+#endif
+  // Make it so dlmalloc fails if we try to request more memory than is
+  // available.
+  dlmalloc_set_footprint_limit((size_t)system_memory);
+  ARROW_LOG(DEBUG) << "starting server listening on " << socket_name;
+  start_server(socket_name, system_memory);
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/store.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/store.h b/cpp/src/plasma/store.h
new file mode 100644
index 0000000..8bd9426
--- /dev/null
+++ b/cpp/src/plasma/store.h
@@ -0,0 +1,169 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef PLASMA_STORE_H
+#define PLASMA_STORE_H
+
+#include <deque>
+#include <vector>
+
+#include "plasma/common.h"
+#include "plasma/events.h"
+#include "plasma/eviction_policy.h"
+#include "plasma/plasma.h"
+#include "plasma/protocol.h"
+
+struct GetRequest;
+
+struct NotificationQueue {
+  /// The object notifications for clients. We notify the client about the
+  /// objects in the order that the objects were sealed or deleted.
+  std::deque<uint8_t*> object_notifications;
+};
+
+/// Contains all information that is associated with a Plasma store client.
+struct Client {
+  explicit Client(int fd);
+
+  /// The file descriptor used to communicate with the client.
+  int fd;
+};
+
+class PlasmaStore {
+ public:
+  PlasmaStore(EventLoop* loop, int64_t system_memory);
+
+  ~PlasmaStore();
+
+  /// Create a new object. The client must do a call to release_object to tell
+  /// the store when it is done with the object.
+  ///
+  /// @param object_id Object ID of the object to be created.
+  /// @param data_size Size in bytes of the object to be created.
+  /// @param metadata_size Size in bytes of the object metadata.
+  /// @return One of the following error codes:
+  ///  - PlasmaError_OK, if the object was created successfully.
+  ///  - PlasmaError_ObjectExists, if an object with this ID is already
+  ///    present in the store. In this case, the client should not call
+  ///    plasma_release.
+  ///  - PlasmaError_OutOfMemory, if the store is out of memory and
+  ///    cannot create the object. In this case, the client should not call
+  ///    plasma_release.
+  int create_object(const ObjectID& object_id, int64_t data_size, int64_t metadata_size,
+      Client* client, PlasmaObject* result);
+
+  /// Delete objects that have been created in the hash table. This should only
+  /// be called on objects that are returned by the eviction policy to evict.
+  ///
+  /// @param object_ids Object IDs of the objects to be deleted.
+  /// @return Void.
+  void delete_objects(const std::vector<ObjectID>& object_ids);
+
+  /// Process a get request from a client. This method assumes that we will
+  /// eventually have these objects sealed. If one of the objects has not yet
+  /// been sealed, the client that requested the object will be notified when it
+  /// is sealed.
+  ///
+  /// For each object, the client must do a call to release_object to tell the
+  /// store when it is done with the object.
+  ///
+  /// @param client The client making this request.
+  /// @param object_ids Object IDs of the objects to be gotten.
+  /// @param timeout_ms The timeout for the get request in milliseconds.
+  /// @return Void.
+  void process_get_request(
+      Client* client, const std::vector<ObjectID>& object_ids, int64_t timeout_ms);
+
+  /// Seal an object. The object is now immutable and can be accessed with get.
+  ///
+  /// @param object_id Object ID of the object to be sealed.
+  /// @param digest The digest of the object. This is used to tell if two
+  /// objects
+  ///        with the same object ID are the same.
+  /// @return Void.
+  void seal_object(const ObjectID& object_id, unsigned char digest[]);
+
+  /// Check if the plasma store contains an object:
+  ///
+  /// @param object_id Object ID that will be checked.
+  /// @return OBJECT_FOUND if the object is in the store, OBJECT_NOT_FOUND if
+  /// not
+  int contains_object(const ObjectID& object_id);
+
+  /// Record the fact that a particular client is no longer using an object.
+  ///
+  /// @param object_id The object ID of the object that is being released.
+  /// @param client The client making this request.
+  /// @param Void.
+  void release_object(const ObjectID& object_id, Client* client);
+
+  /// Subscribe a file descriptor to updates about new sealed objects.
+  ///
+  /// @param client The client making this request.
+  /// @return Void.
+  void subscribe_to_updates(Client* client);
+
+  /// Connect a new client to the PlasmaStore.
+  ///
+  /// @param listener_sock The socket that is listening to incoming connections.
+  /// @return Void.
+  void connect_client(int listener_sock);
+
+  /// Disconnect a client from the PlasmaStore.
+  ///
+  /// @param client The client that is disconnected.
+  /// @return Void.
+  void disconnect_client(Client* client);
+
+  void send_notifications(int client_fd);
+
+  Status process_message(Client* client);
+
+ private:
+  void push_notification(ObjectInfoT* object_notification);
+
+  void add_client_to_object_clients(ObjectTableEntry* entry, Client* client);
+
+  void return_from_get(GetRequest* get_req);
+
+  void update_object_get_requests(const ObjectID& object_id);
+
+  int remove_client_from_object_clients(ObjectTableEntry* entry, Client* client);
+
+  /// Event loop of the plasma store.
+  EventLoop* loop_;
+  /// The plasma store information, including the object tables, that is exposed
+  /// to the eviction policy.
+  PlasmaStoreInfo store_info_;
+  /// The state that is managed by the eviction policy.
+  EvictionPolicy eviction_policy_;
+  /// Input buffer. This is allocated only once to avoid mallocs for every
+  /// call to process_message.
+  std::vector<uint8_t> input_buffer_;
+  /// A hash table mapping object IDs to a vector of the get requests that are
+  /// waiting for the object to arrive.
+  std::unordered_map<ObjectID, std::vector<GetRequest*>, UniqueIDHasher>
+      object_get_requests_;
+  /// The pending notifications that have not been sent to subscribers because
+  /// the socket send buffers were full. This is a hash table from client file
+  /// descriptor to an array of object_ids to send to that client.
+  /// TODO(pcm): Consider putting this into the Client data structure and
+  /// reorganize the code slightly.
+  std::unordered_map<int, NotificationQueue> pending_notifications_;
+};
+
+#endif  // PLASMA_STORE_H

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/test/client_tests.cc
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/test/client_tests.cc b/cpp/src/plasma/test/client_tests.cc
new file mode 100644
index 0000000..dc45773
--- /dev/null
+++ b/cpp/src/plasma/test/client_tests.cc
@@ -0,0 +1,132 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "gtest/gtest.h"
+
+#include <assert.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "plasma/client.h"
+#include "plasma/common.h"
+#include "plasma/plasma.h"
+#include "plasma/protocol.h"
+
+std::string g_test_executable;
+
+class TestPlasmaStore : public ::testing::Test {
+ public:
+  // TODO(pcm): At the moment, stdout of the test gets mixed up with
+  // stdout of the object store. Consider changing that.
+  void SetUp() {
+    std::string plasma_directory =
+        g_test_executable.substr(0, g_test_executable.find_last_of("/"));
+    std::string plasma_command =
+        plasma_directory +
+        "/plasma_store -m 1000000000 -s /tmp/store 1> /dev/null 2> /dev/null &";
+    system(plasma_command.c_str());
+    ARROW_CHECK_OK(client_.Connect("/tmp/store", "", PLASMA_DEFAULT_RELEASE_DELAY));
+  }
+  virtual void Finish() {
+    ARROW_CHECK_OK(client_.Disconnect());
+    system("killall plasma_store &");
+  }
+
+ protected:
+  PlasmaClient client_;
+};
+
+TEST_F(TestPlasmaStore, ContainsTest) {
+  ObjectID object_id = ObjectID::from_random();
+
+  // Test for object non-existence.
+  bool has_object;
+  ARROW_CHECK_OK(client_.Contains(object_id, &has_object));
+  ASSERT_EQ(has_object, false);
+
+  // Test for the object being in local Plasma store.
+  // First create object.
+  int64_t data_size = 100;
+  uint8_t metadata[] = {5};
+  int64_t metadata_size = sizeof(metadata);
+  uint8_t* data;
+  ARROW_CHECK_OK(client_.Create(object_id, data_size, metadata, metadata_size, &data));
+  ARROW_CHECK_OK(client_.Seal(object_id));
+  // Avoid race condition of Plasma Manager waiting for notification.
+  ObjectBuffer object_buffer;
+  ARROW_CHECK_OK(client_.Get(&object_id, 1, -1, &object_buffer));
+  ARROW_CHECK_OK(client_.Contains(object_id, &has_object));
+  ASSERT_EQ(has_object, true);
+}
+
+TEST_F(TestPlasmaStore, GetTest) {
+  ObjectID object_id = ObjectID::from_random();
+  ObjectBuffer object_buffer;
+
+  // Test for object non-existence.
+  ARROW_CHECK_OK(client_.Get(&object_id, 1, 0, &object_buffer));
+  ASSERT_EQ(object_buffer.data_size, -1);
+
+  // Test for the object being in local Plasma store.
+  // First create object.
+  int64_t data_size = 4;
+  uint8_t metadata[] = {5};
+  int64_t metadata_size = sizeof(metadata);
+  uint8_t* data;
+  ARROW_CHECK_OK(client_.Create(object_id, data_size, metadata, metadata_size, &data));
+  for (int64_t i = 0; i < data_size; i++) {
+    data[i] = static_cast<uint8_t>(i % 4);
+  }
+  ARROW_CHECK_OK(client_.Seal(object_id));
+
+  ARROW_CHECK_OK(client_.Get(&object_id, 1, -1, &object_buffer));
+  for (int64_t i = 0; i < data_size; i++) {
+    ASSERT_EQ(data[i], object_buffer.data[i]);
+  }
+}
+
+TEST_F(TestPlasmaStore, MultipleGetTest) {
+  ObjectID object_id1 = ObjectID::from_random();
+  ObjectID object_id2 = ObjectID::from_random();
+  ObjectID object_ids[2] = {object_id1, object_id2};
+  ObjectBuffer object_buffer[2];
+
+  int64_t data_size = 4;
+  uint8_t metadata[] = {5};
+  int64_t metadata_size = sizeof(metadata);
+  uint8_t* data;
+  ARROW_CHECK_OK(client_.Create(object_id1, data_size, metadata, metadata_size, &data));
+  data[0] = 1;
+  ARROW_CHECK_OK(client_.Seal(object_id1));
+
+  ARROW_CHECK_OK(client_.Create(object_id2, data_size, metadata, metadata_size, &data));
+  data[0] = 2;
+  ARROW_CHECK_OK(client_.Seal(object_id2));
+
+  ARROW_CHECK_OK(client_.Get(object_ids, 2, -1, object_buffer));
+  ASSERT_EQ(object_buffer[0].data[0], 1);
+  ASSERT_EQ(object_buffer[1].data[0], 2);
+}
+
+int main(int argc, char** argv) {
+  ::testing::InitGoogleTest(&argc, argv);
+  g_test_executable = std::string(argv[0]);
+  return RUN_ALL_TESTS();
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/test/run_tests.sh
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/test/run_tests.sh b/cpp/src/plasma/test/run_tests.sh
new file mode 100644
index 0000000..958bd08
--- /dev/null
+++ b/cpp/src/plasma/test/run_tests.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Cause the script to exit if a single command fails.
+set -e
+
+./src/plasma/plasma_store -s /tmp/plasma_store_socket_1 -m 0 &
+sleep 1
+./src/plasma/manager_tests
+killall plasma_store
+./src/plasma/serialization_tests
+
+# Start the Redis shards.
+./src/common/thirdparty/redis/src/redis-server --loglevel warning --loadmodule ./src/common/redis_module/libray_redis_module.so --port 6379 &
+redis_pid1=$!
+./src/common/thirdparty/redis/src/redis-server --loglevel warning --loadmodule ./src/common/redis_module/libray_redis_module.so --port 6380 &
+redis_pid2=$!
+sleep 1
+
+# Flush the redis server
+./src/common/thirdparty/redis/src/redis-cli flushall
+# Register the shard location with the primary shard.
+./src/common/thirdparty/redis/src/redis-cli set NumRedisShards 1
+./src/common/thirdparty/redis/src/redis-cli rpush RedisShards 127.0.0.1:6380
+sleep 1
+./src/plasma/plasma_store -s /tmp/store1 -m 1000000000 &
+plasma1_pid=$!
+./src/plasma/plasma_manager -m /tmp/manager1 -s /tmp/store1 -h 127.0.0.1 -p 11111 -r 127.0.0.1:6379 &
+plasma2_pid=$!
+./src/plasma/plasma_store -s /tmp/store2 -m 1000000000 &
+plasma3_pid=$!
+./src/plasma/plasma_manager -m /tmp/manager2 -s /tmp/store2 -h 127.0.0.1 -p 22222 -r 127.0.0.1:6379 &
+plasma4_pid=$!
+sleep 1
+
+./src/plasma/client_tests
+
+kill $plasma4_pid
+kill $plasma3_pid
+kill $plasma2_pid
+kill $plasma1_pid
+kill $redis_pid1
+wait $redis_pid1
+kill $redis_pid2
+wait $redis_pid2

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/test/run_valgrind.sh
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/test/run_valgrind.sh b/cpp/src/plasma/test/run_valgrind.sh
new file mode 100644
index 0000000..0472194
--- /dev/null
+++ b/cpp/src/plasma/test/run_valgrind.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Cause the script to exit if a single command fails.
+set -e
+
+./src/plasma/plasma_store -s /tmp/plasma_store_socket_1 -m 0 &
+sleep 1
+valgrind --leak-check=full --error-exitcode=1 ./src/plasma/manager_tests
+killall plasma_store
+valgrind --leak-check=full --error-exitcode=1 ./src/plasma/serialization_tests

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/test/serialization_tests.cc
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/test/serialization_tests.cc b/cpp/src/plasma/test/serialization_tests.cc
new file mode 100644
index 0000000..325cead
--- /dev/null
+++ b/cpp/src/plasma/test/serialization_tests.cc
@@ -0,0 +1,388 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "gtest/gtest.h"
+
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "plasma/common.h"
+#include "plasma/io.h"
+#include "plasma/plasma.h"
+#include "plasma/protocol.h"
+
+/**
+ * Create a temporary file. Needs to be closed by the caller.
+ *
+ * @return File descriptor of the file.
+ */
+int create_temp_file(void) {
+  static char temp[] = "/tmp/tempfileXXXXXX";
+  char file_name[32];
+  strncpy(file_name, temp, 32);
+  return mkstemp(file_name);
+}
+
+/**
+ * Seek to the beginning of a file and read a message from it.
+ *
+ * @param fd File descriptor of the file.
+ * @param message type Message type that we expect in the file.
+ *
+ * @return Pointer to the content of the message. Needs to be freed by the
+ * caller.
+ */
+std::vector<uint8_t> read_message_from_file(int fd, int message_type) {
+  /* Go to the beginning of the file. */
+  lseek(fd, 0, SEEK_SET);
+  int64_t type;
+  std::vector<uint8_t> data;
+  ARROW_CHECK_OK(ReadMessage(fd, &type, &data));
+  ARROW_CHECK(type == message_type);
+  return data;
+}
+
+PlasmaObject random_plasma_object(void) {
+  unsigned int seed = static_cast<unsigned int>(time(NULL));
+  int random = rand_r(&seed);
+  PlasmaObject object;
+  memset(&object, 0, sizeof(object));
+  object.handle.store_fd = random + 7;
+  object.handle.mmap_size = random + 42;
+  object.data_offset = random + 1;
+  object.metadata_offset = random + 2;
+  object.data_size = random + 3;
+  object.metadata_size = random + 4;
+  return object;
+}
+
+TEST(PlasmaSerialization, CreateRequest) {
+  int fd = create_temp_file();
+  ObjectID object_id1 = ObjectID::from_random();
+  int64_t data_size1 = 42;
+  int64_t metadata_size1 = 11;
+  ARROW_CHECK_OK(SendCreateRequest(fd, object_id1, data_size1, metadata_size1));
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaCreateRequest);
+  ObjectID object_id2;
+  int64_t data_size2;
+  int64_t metadata_size2;
+  ARROW_CHECK_OK(
+      ReadCreateRequest(data.data(), &object_id2, &data_size2, &metadata_size2));
+  ASSERT_EQ(data_size1, data_size2);
+  ASSERT_EQ(metadata_size1, metadata_size2);
+  ASSERT_EQ(object_id1, object_id2);
+  close(fd);
+}
+
+TEST(PlasmaSerialization, CreateReply) {
+  int fd = create_temp_file();
+  ObjectID object_id1 = ObjectID::from_random();
+  PlasmaObject object1 = random_plasma_object();
+  ARROW_CHECK_OK(SendCreateReply(fd, object_id1, &object1, 0));
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaCreateReply);
+  ObjectID object_id2;
+  PlasmaObject object2;
+  memset(&object2, 0, sizeof(object2));
+  ARROW_CHECK_OK(ReadCreateReply(data.data(), &object_id2, &object2));
+  ASSERT_EQ(object_id1, object_id2);
+  ASSERT_EQ(memcmp(&object1, &object2, sizeof(object1)), 0);
+  close(fd);
+}
+
+TEST(PlasmaSerialization, SealRequest) {
+  int fd = create_temp_file();
+  ObjectID object_id1 = ObjectID::from_random();
+  unsigned char digest1[kDigestSize];
+  memset(&digest1[0], 7, kDigestSize);
+  ARROW_CHECK_OK(SendSealRequest(fd, object_id1, &digest1[0]));
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaSealRequest);
+  ObjectID object_id2;
+  unsigned char digest2[kDigestSize];
+  ARROW_CHECK_OK(ReadSealRequest(data.data(), &object_id2, &digest2[0]));
+  ASSERT_EQ(object_id1, object_id2);
+  ASSERT_EQ(memcmp(&digest1[0], &digest2[0], kDigestSize), 0);
+  close(fd);
+}
+
+TEST(PlasmaSerialization, SealReply) {
+  int fd = create_temp_file();
+  ObjectID object_id1 = ObjectID::from_random();
+  ARROW_CHECK_OK(SendSealReply(fd, object_id1, PlasmaError_ObjectExists));
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaSealReply);
+  ObjectID object_id2;
+  Status s = ReadSealReply(data.data(), &object_id2);
+  ASSERT_EQ(object_id1, object_id2);
+  ASSERT_TRUE(s.IsPlasmaObjectExists());
+  close(fd);
+}
+
+TEST(PlasmaSerialization, GetRequest) {
+  int fd = create_temp_file();
+  ObjectID object_ids[2];
+  object_ids[0] = ObjectID::from_random();
+  object_ids[1] = ObjectID::from_random();
+  int64_t timeout_ms = 1234;
+  ARROW_CHECK_OK(SendGetRequest(fd, object_ids, 2, timeout_ms));
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaGetRequest);
+  std::vector<ObjectID> object_ids_return;
+  int64_t timeout_ms_return;
+  ARROW_CHECK_OK(ReadGetRequest(data.data(), object_ids_return, &timeout_ms_return));
+  ASSERT_EQ(object_ids[0], object_ids_return[0]);
+  ASSERT_EQ(object_ids[1], object_ids_return[1]);
+  ASSERT_EQ(timeout_ms, timeout_ms_return);
+  close(fd);
+}
+
+TEST(PlasmaSerialization, GetReply) {
+  int fd = create_temp_file();
+  ObjectID object_ids[2];
+  object_ids[0] = ObjectID::from_random();
+  object_ids[1] = ObjectID::from_random();
+  std::unordered_map<ObjectID, PlasmaObject, UniqueIDHasher> plasma_objects;
+  plasma_objects[object_ids[0]] = random_plasma_object();
+  plasma_objects[object_ids[1]] = random_plasma_object();
+  ARROW_CHECK_OK(SendGetReply(fd, object_ids, plasma_objects, 2));
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaGetReply);
+  ObjectID object_ids_return[2];
+  PlasmaObject plasma_objects_return[2];
+  memset(&plasma_objects_return, 0, sizeof(plasma_objects_return));
+  ARROW_CHECK_OK(
+      ReadGetReply(data.data(), object_ids_return, &plasma_objects_return[0], 2));
+  ASSERT_EQ(object_ids[0], object_ids_return[0]);
+  ASSERT_EQ(object_ids[1], object_ids_return[1]);
+  ASSERT_EQ(memcmp(&plasma_objects[object_ids[0]], &plasma_objects_return[0],
+                sizeof(PlasmaObject)),
+      0);
+  ASSERT_EQ(memcmp(&plasma_objects[object_ids[1]], &plasma_objects_return[1],
+                sizeof(PlasmaObject)),
+      0);
+  close(fd);
+}
+
+TEST(PlasmaSerialization, ReleaseRequest) {
+  int fd = create_temp_file();
+  ObjectID object_id1 = ObjectID::from_random();
+  ARROW_CHECK_OK(SendReleaseRequest(fd, object_id1));
+  std::vector<uint8_t> data =
+      read_message_from_file(fd, MessageType_PlasmaReleaseRequest);
+  ObjectID object_id2;
+  ARROW_CHECK_OK(ReadReleaseRequest(data.data(), &object_id2));
+  ASSERT_EQ(object_id1, object_id2);
+  close(fd);
+}
+
+TEST(PlasmaSerialization, ReleaseReply) {
+  int fd = create_temp_file();
+  ObjectID object_id1 = ObjectID::from_random();
+  ARROW_CHECK_OK(SendReleaseReply(fd, object_id1, PlasmaError_ObjectExists));
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaReleaseReply);
+  ObjectID object_id2;
+  Status s = ReadReleaseReply(data.data(), &object_id2);
+  ASSERT_EQ(object_id1, object_id2);
+  ASSERT_TRUE(s.IsPlasmaObjectExists());
+  close(fd);
+}
+
+TEST(PlasmaSerialization, DeleteRequest) {
+  int fd = create_temp_file();
+  ObjectID object_id1 = ObjectID::from_random();
+  ARROW_CHECK_OK(SendDeleteRequest(fd, object_id1));
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaDeleteRequest);
+  ObjectID object_id2;
+  ARROW_CHECK_OK(ReadDeleteRequest(data.data(), &object_id2));
+  ASSERT_EQ(object_id1, object_id2);
+  close(fd);
+}
+
+TEST(PlasmaSerialization, DeleteReply) {
+  int fd = create_temp_file();
+  ObjectID object_id1 = ObjectID::from_random();
+  int error1 = PlasmaError_ObjectExists;
+  ARROW_CHECK_OK(SendDeleteReply(fd, object_id1, error1));
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaDeleteReply);
+  ObjectID object_id2;
+  Status s = ReadDeleteReply(data.data(), &object_id2);
+  ASSERT_EQ(object_id1, object_id2);
+  ASSERT_TRUE(s.IsPlasmaObjectExists());
+  close(fd);
+}
+
+TEST(PlasmaSerialization, StatusRequest) {
+  int fd = create_temp_file();
+  int64_t num_objects = 2;
+  ObjectID object_ids[num_objects];
+  object_ids[0] = ObjectID::from_random();
+  object_ids[1] = ObjectID::from_random();
+  ARROW_CHECK_OK(SendStatusRequest(fd, object_ids, num_objects));
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaStatusRequest);
+  ObjectID object_ids_read[num_objects];
+  ARROW_CHECK_OK(ReadStatusRequest(data.data(), object_ids_read, num_objects));
+  ASSERT_EQ(object_ids[0], object_ids_read[0]);
+  ASSERT_EQ(object_ids[1], object_ids_read[1]);
+  close(fd);
+}
+
+TEST(PlasmaSerialization, StatusReply) {
+  int fd = create_temp_file();
+  ObjectID object_ids[2];
+  object_ids[0] = ObjectID::from_random();
+  object_ids[1] = ObjectID::from_random();
+  int object_statuses[2] = {42, 43};
+  ARROW_CHECK_OK(SendStatusReply(fd, object_ids, object_statuses, 2));
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaStatusReply);
+  int64_t num_objects = ReadStatusReply_num_objects(data.data());
+  ObjectID object_ids_read[num_objects];
+  int object_statuses_read[num_objects];
+  ARROW_CHECK_OK(
+      ReadStatusReply(data.data(), object_ids_read, object_statuses_read, num_objects));
+  ASSERT_EQ(object_ids[0], object_ids_read[0]);
+  ASSERT_EQ(object_ids[1], object_ids_read[1]);
+  ASSERT_EQ(object_statuses[0], object_statuses_read[0]);
+  ASSERT_EQ(object_statuses[1], object_statuses_read[1]);
+  close(fd);
+}
+
+TEST(PlasmaSerialization, EvictRequest) {
+  int fd = create_temp_file();
+  int64_t num_bytes = 111;
+  ARROW_CHECK_OK(SendEvictRequest(fd, num_bytes));
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaEvictRequest);
+  int64_t num_bytes_received;
+  ARROW_CHECK_OK(ReadEvictRequest(data.data(), &num_bytes_received));
+  ASSERT_EQ(num_bytes, num_bytes_received);
+  close(fd);
+}
+
+TEST(PlasmaSerialization, EvictReply) {
+  int fd = create_temp_file();
+  int64_t num_bytes = 111;
+  ARROW_CHECK_OK(SendEvictReply(fd, num_bytes));
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaEvictReply);
+  int64_t num_bytes_received;
+  ARROW_CHECK_OK(ReadEvictReply(data.data(), num_bytes_received));
+  ASSERT_EQ(num_bytes, num_bytes_received);
+  close(fd);
+}
+
+TEST(PlasmaSerialization, FetchRequest) {
+  int fd = create_temp_file();
+  ObjectID object_ids[2];
+  object_ids[0] = ObjectID::from_random();
+  object_ids[1] = ObjectID::from_random();
+  ARROW_CHECK_OK(SendFetchRequest(fd, object_ids, 2));
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaFetchRequest);
+  std::vector<ObjectID> object_ids_read;
+  ARROW_CHECK_OK(ReadFetchRequest(data.data(), object_ids_read));
+  ASSERT_EQ(object_ids[0], object_ids_read[0]);
+  ASSERT_EQ(object_ids[1], object_ids_read[1]);
+  close(fd);
+}
+
+TEST(PlasmaSerialization, WaitRequest) {
+  int fd = create_temp_file();
+  const int num_objects_in = 2;
+  ObjectRequest object_requests_in[num_objects_in] = {
+      ObjectRequest({ObjectID::from_random(), PLASMA_QUERY_ANYWHERE, 0}),
+      ObjectRequest({ObjectID::from_random(), PLASMA_QUERY_LOCAL, 0})};
+  const int num_ready_objects_in = 1;
+  int64_t timeout_ms = 1000;
+
+  ARROW_CHECK_OK(SendWaitRequest(
+      fd, &object_requests_in[0], num_objects_in, num_ready_objects_in, timeout_ms));
+  /* Read message back. */
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaWaitRequest);
+  int num_ready_objects_out;
+  int64_t timeout_ms_read;
+  ObjectRequestMap object_requests_out;
+  ARROW_CHECK_OK(ReadWaitRequest(
+      data.data(), object_requests_out, &timeout_ms_read, &num_ready_objects_out));
+  ASSERT_EQ(num_objects_in, object_requests_out.size());
+  ASSERT_EQ(num_ready_objects_out, num_ready_objects_in);
+  for (int i = 0; i < num_objects_in; i++) {
+    const ObjectID& object_id = object_requests_in[i].object_id;
+    ASSERT_EQ(1, object_requests_out.count(object_id));
+    const auto& entry = object_requests_out.find(object_id);
+    ASSERT_TRUE(entry != object_requests_out.end());
+    ASSERT_EQ(entry->second.object_id, object_requests_in[i].object_id);
+    ASSERT_EQ(entry->second.type, object_requests_in[i].type);
+  }
+  close(fd);
+}
+
+TEST(PlasmaSerialization, WaitReply) {
+  int fd = create_temp_file();
+  const int num_objects_in = 2;
+  /* Create a map with two ObjectRequests in it. */
+  ObjectRequestMap objects_in(num_objects_in);
+  ObjectID id1 = ObjectID::from_random();
+  objects_in[id1] = ObjectRequest({id1, 0, ObjectStatus_Local});
+  ObjectID id2 = ObjectID::from_random();
+  objects_in[id2] = ObjectRequest({id2, 0, ObjectStatus_Nonexistent});
+
+  ARROW_CHECK_OK(SendWaitReply(fd, objects_in, num_objects_in));
+  /* Read message back. */
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaWaitReply);
+  ObjectRequest objects_out[2];
+  int num_objects_out;
+  ARROW_CHECK_OK(ReadWaitReply(data.data(), &objects_out[0], &num_objects_out));
+  ASSERT_EQ(num_objects_in, num_objects_out);
+  for (int i = 0; i < num_objects_out; i++) {
+    /* Each object request must appear exactly once. */
+    ASSERT_EQ(objects_in.count(objects_out[i].object_id), 1);
+    const auto& entry = objects_in.find(objects_out[i].object_id);
+    ASSERT_TRUE(entry != objects_in.end());
+    ASSERT_EQ(entry->second.object_id, objects_out[i].object_id);
+    ASSERT_EQ(entry->second.status, objects_out[i].status);
+  }
+  close(fd);
+}
+
+TEST(PlasmaSerialization, DataRequest) {
+  int fd = create_temp_file();
+  ObjectID object_id1 = ObjectID::from_random();
+  const char* address1 = "address1";
+  int port1 = 12345;
+  ARROW_CHECK_OK(SendDataRequest(fd, object_id1, address1, port1));
+  /* Reading message back. */
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaDataRequest);
+  ObjectID object_id2;
+  char* address2;
+  int port2;
+  ARROW_CHECK_OK(ReadDataRequest(data.data(), &object_id2, &address2, &port2));
+  ASSERT_EQ(object_id1, object_id2);
+  ASSERT_EQ(strcmp(address1, address2), 0);
+  ASSERT_EQ(port1, port2);
+  free(address2);
+  close(fd);
+}
+
+TEST(PlasmaSerialization, DataReply) {
+  int fd = create_temp_file();
+  ObjectID object_id1 = ObjectID::from_random();
+  int64_t object_size1 = 146;
+  int64_t metadata_size1 = 198;
+  ARROW_CHECK_OK(SendDataReply(fd, object_id1, object_size1, metadata_size1));
+  /* Reading message back. */
+  std::vector<uint8_t> data = read_message_from_file(fd, MessageType_PlasmaDataReply);
+  ObjectID object_id2;
+  int64_t object_size2;
+  int64_t metadata_size2;
+  ARROW_CHECK_OK(ReadDataReply(data.data(), &object_id2, &object_size2, &metadata_size2));
+  ASSERT_EQ(object_id1, object_id2);
+  ASSERT_EQ(object_size1, object_size2);
+  ASSERT_EQ(metadata_size1, metadata_size2);
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/thirdparty/ae/ae.c
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/thirdparty/ae/ae.c b/cpp/src/plasma/thirdparty/ae/ae.c
new file mode 100644
index 0000000..e66808a
--- /dev/null
+++ b/cpp/src/plasma/thirdparty/ae/ae.c
@@ -0,0 +1,465 @@
+/* A simple event-driven programming library. Originally I wrote this code
+ * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated
+ * it in form of a library for easy reuse.
+ *
+ * Copyright (c) 2006-2010, Salvatore Sanfilippo <antirez at gmail dot com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *   * Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *   * Neither the name of Redis nor the names of its contributors may be used
+ *     to endorse or promote products derived from this software without
+ *     specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <poll.h>
+#include <string.h>
+#include <time.h>
+#include <errno.h>
+
+#include "ae.h"
+#include "zmalloc.h"
+#include "config.h"
+
+/* Include the best multiplexing layer supported by this system.
+ * The following should be ordered by performances, descending. */
+#ifdef HAVE_EVPORT
+#include "ae_evport.c"
+#else
+    #ifdef HAVE_EPOLL
+    #include "ae_epoll.c"
+    #else
+        #ifdef HAVE_KQUEUE
+        #include "ae_kqueue.c"
+        #else
+        #include "ae_select.c"
+        #endif
+    #endif
+#endif
+
+aeEventLoop *aeCreateEventLoop(int setsize) {
+    aeEventLoop *eventLoop;
+    int i;
+
+    if ((eventLoop = zmalloc(sizeof(*eventLoop))) == NULL) goto err;
+    eventLoop->events = zmalloc(sizeof(aeFileEvent)*setsize);
+    eventLoop->fired = zmalloc(sizeof(aeFiredEvent)*setsize);
+    if (eventLoop->events == NULL || eventLoop->fired == NULL) goto err;
+    eventLoop->setsize = setsize;
+    eventLoop->lastTime = time(NULL);
+    eventLoop->timeEventHead = NULL;
+    eventLoop->timeEventNextId = 0;
+    eventLoop->stop = 0;
+    eventLoop->maxfd = -1;
+    eventLoop->beforesleep = NULL;
+    if (aeApiCreate(eventLoop) == -1) goto err;
+    /* Events with mask == AE_NONE are not set. So let's initialize the
+     * vector with it. */
+    for (i = 0; i < setsize; i++)
+        eventLoop->events[i].mask = AE_NONE;
+    return eventLoop;
+
+err:
+    if (eventLoop) {
+        zfree(eventLoop->events);
+        zfree(eventLoop->fired);
+        zfree(eventLoop);
+    }
+    return NULL;
+}
+
+/* Return the current set size. */
+int aeGetSetSize(aeEventLoop *eventLoop) {
+    return eventLoop->setsize;
+}
+
+/* Resize the maximum set size of the event loop.
+ * If the requested set size is smaller than the current set size, but
+ * there is already a file descriptor in use that is >= the requested
+ * set size minus one, AE_ERR is returned and the operation is not
+ * performed at all.
+ *
+ * Otherwise AE_OK is returned and the operation is successful. */
+int aeResizeSetSize(aeEventLoop *eventLoop, int setsize) {
+    int i;
+
+    if (setsize == eventLoop->setsize) return AE_OK;
+    if (eventLoop->maxfd >= setsize) return AE_ERR;
+    if (aeApiResize(eventLoop,setsize) == -1) return AE_ERR;
+
+    eventLoop->events = zrealloc(eventLoop->events,sizeof(aeFileEvent)*setsize);
+    eventLoop->fired = zrealloc(eventLoop->fired,sizeof(aeFiredEvent)*setsize);
+    eventLoop->setsize = setsize;
+
+    /* Make sure that if we created new slots, they are initialized with
+     * an AE_NONE mask. */
+    for (i = eventLoop->maxfd+1; i < setsize; i++)
+        eventLoop->events[i].mask = AE_NONE;
+    return AE_OK;
+}
+
+void aeDeleteEventLoop(aeEventLoop *eventLoop) {
+    aeApiFree(eventLoop);
+    zfree(eventLoop->events);
+    zfree(eventLoop->fired);
+    zfree(eventLoop);
+}
+
+void aeStop(aeEventLoop *eventLoop) {
+    eventLoop->stop = 1;
+}
+
+int aeCreateFileEvent(aeEventLoop *eventLoop, int fd, int mask,
+        aeFileProc *proc, void *clientData)
+{
+    if (fd >= eventLoop->setsize) {
+        errno = ERANGE;
+        return AE_ERR;
+    }
+    aeFileEvent *fe = &eventLoop->events[fd];
+
+    if (aeApiAddEvent(eventLoop, fd, mask) == -1)
+        return AE_ERR;
+    fe->mask |= mask;
+    if (mask & AE_READABLE) fe->rfileProc = proc;
+    if (mask & AE_WRITABLE) fe->wfileProc = proc;
+    fe->clientData = clientData;
+    if (fd > eventLoop->maxfd)
+        eventLoop->maxfd = fd;
+    return AE_OK;
+}
+
+void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask)
+{
+    if (fd >= eventLoop->setsize) return;
+    aeFileEvent *fe = &eventLoop->events[fd];
+    if (fe->mask == AE_NONE) return;
+
+    aeApiDelEvent(eventLoop, fd, mask);
+    fe->mask = fe->mask & (~mask);
+    if (fd == eventLoop->maxfd && fe->mask == AE_NONE) {
+        /* Update the max fd */
+        int j;
+
+        for (j = eventLoop->maxfd-1; j >= 0; j--)
+            if (eventLoop->events[j].mask != AE_NONE) break;
+        eventLoop->maxfd = j;
+    }
+}
+
+int aeGetFileEvents(aeEventLoop *eventLoop, int fd) {
+    if (fd >= eventLoop->setsize) return 0;
+    aeFileEvent *fe = &eventLoop->events[fd];
+
+    return fe->mask;
+}
+
+static void aeGetTime(long *seconds, long *milliseconds)
+{
+    struct timeval tv;
+
+    gettimeofday(&tv, NULL);
+    *seconds = tv.tv_sec;
+    *milliseconds = tv.tv_usec/1000;
+}
+
+static void aeAddMillisecondsToNow(long long milliseconds, long *sec, long *ms) {
+    long cur_sec, cur_ms, when_sec, when_ms;
+
+    aeGetTime(&cur_sec, &cur_ms);
+    when_sec = cur_sec + milliseconds/1000;
+    when_ms = cur_ms + milliseconds%1000;
+    if (when_ms >= 1000) {
+        when_sec ++;
+        when_ms -= 1000;
+    }
+    *sec = when_sec;
+    *ms = when_ms;
+}
+
+long long aeCreateTimeEvent(aeEventLoop *eventLoop, long long milliseconds,
+        aeTimeProc *proc, void *clientData,
+        aeEventFinalizerProc *finalizerProc)
+{
+    long long id = eventLoop->timeEventNextId++;
+    aeTimeEvent *te;
+
+    te = zmalloc(sizeof(*te));
+    if (te == NULL) return AE_ERR;
+    te->id = id;
+    aeAddMillisecondsToNow(milliseconds,&te->when_sec,&te->when_ms);
+    te->timeProc = proc;
+    te->finalizerProc = finalizerProc;
+    te->clientData = clientData;
+    te->next = eventLoop->timeEventHead;
+    eventLoop->timeEventHead = te;
+    return id;
+}
+
+int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id)
+{
+    aeTimeEvent *te = eventLoop->timeEventHead;
+    while(te) {
+        if (te->id == id) {
+            te->id = AE_DELETED_EVENT_ID;
+            return AE_OK;
+        }
+        te = te->next;
+    }
+    return AE_ERR; /* NO event with the specified ID found */
+}
+
+/* Search the first timer to fire.
+ * This operation is useful to know how many time the select can be
+ * put in sleep without to delay any event.
+ * If there are no timers NULL is returned.
+ *
+ * Note that's O(N) since time events are unsorted.
+ * Possible optimizations (not needed by Redis so far, but...):
+ * 1) Insert the event in order, so that the nearest is just the head.
+ *    Much better but still insertion or deletion of timers is O(N).
+ * 2) Use a skiplist to have this operation as O(1) and insertion as O(log(N)).
+ */
+static aeTimeEvent *aeSearchNearestTimer(aeEventLoop *eventLoop)
+{
+    aeTimeEvent *te = eventLoop->timeEventHead;
+    aeTimeEvent *nearest = NULL;
+
+    while(te) {
+        if (!nearest || te->when_sec < nearest->when_sec ||
+                (te->when_sec == nearest->when_sec &&
+                 te->when_ms < nearest->when_ms))
+            nearest = te;
+        te = te->next;
+    }
+    return nearest;
+}
+
+/* Process time events */
+static int processTimeEvents(aeEventLoop *eventLoop) {
+    int processed = 0;
+    aeTimeEvent *te, *prev;
+    long long maxId;
+    time_t now = time(NULL);
+
+    /* If the system clock is moved to the future, and then set back to the
+     * right value, time events may be delayed in a random way. Often this
+     * means that scheduled operations will not be performed soon enough.
+     *
+     * Here we try to detect system clock skews, and force all the time
+     * events to be processed ASAP when this happens: the idea is that
+     * processing events earlier is less dangerous than delaying them
+     * indefinitely, and practice suggests it is. */
+    if (now < eventLoop->lastTime) {
+        te = eventLoop->timeEventHead;
+        while(te) {
+            te->when_sec = 0;
+            te = te->next;
+        }
+    }
+    eventLoop->lastTime = now;
+
+    prev = NULL;
+    te = eventLoop->timeEventHead;
+    maxId = eventLoop->timeEventNextId-1;
+    while(te) {
+        long now_sec, now_ms;
+        long long id;
+
+        /* Remove events scheduled for deletion. */
+        if (te->id == AE_DELETED_EVENT_ID) {
+            aeTimeEvent *next = te->next;
+            if (prev == NULL)
+                eventLoop->timeEventHead = te->next;
+            else
+                prev->next = te->next;
+            if (te->finalizerProc)
+                te->finalizerProc(eventLoop, te->clientData);
+            zfree(te);
+            te = next;
+            continue;
+        }
+
+        /* Make sure we don't process time events created by time events in
+         * this iteration. Note that this check is currently useless: we always
+         * add new timers on the head, however if we change the implementation
+         * detail, this check may be useful again: we keep it here for future
+         * defense. */
+        if (te->id > maxId) {
+            te = te->next;
+            continue;
+        }
+        aeGetTime(&now_sec, &now_ms);
+        if (now_sec > te->when_sec ||
+            (now_sec == te->when_sec && now_ms >= te->when_ms))
+        {
+            int retval;
+
+            id = te->id;
+            retval = te->timeProc(eventLoop, id, te->clientData);
+            processed++;
+            if (retval != AE_NOMORE) {
+                aeAddMillisecondsToNow(retval,&te->when_sec,&te->when_ms);
+            } else {
+                te->id = AE_DELETED_EVENT_ID;
+            }
+        }
+        prev = te;
+        te = te->next;
+    }
+    return processed;
+}
+
+/* Process every pending time event, then every pending file event
+ * (that may be registered by time event callbacks just processed).
+ * Without special flags the function sleeps until some file event
+ * fires, or when the next time event occurs (if any).
+ *
+ * If flags is 0, the function does nothing and returns.
+ * if flags has AE_ALL_EVENTS set, all the kind of events are processed.
+ * if flags has AE_FILE_EVENTS set, file events are processed.
+ * if flags has AE_TIME_EVENTS set, time events are processed.
+ * if flags has AE_DONT_WAIT set the function returns ASAP until all
+ * the events that's possible to process without to wait are processed.
+ *
+ * The function returns the number of events processed. */
+int aeProcessEvents(aeEventLoop *eventLoop, int flags)
+{
+    int processed = 0, numevents;
+
+    /* Nothing to do? return ASAP */
+    if (!(flags & AE_TIME_EVENTS) && !(flags & AE_FILE_EVENTS)) return 0;
+
+    /* Note that we want call select() even if there are no
+     * file events to process as long as we want to process time
+     * events, in order to sleep until the next time event is ready
+     * to fire. */
+    if (eventLoop->maxfd != -1 ||
+        ((flags & AE_TIME_EVENTS) && !(flags & AE_DONT_WAIT))) {
+        int j;
+        aeTimeEvent *shortest = NULL;
+        struct timeval tv, *tvp;
+
+        if (flags & AE_TIME_EVENTS && !(flags & AE_DONT_WAIT))
+            shortest = aeSearchNearestTimer(eventLoop);
+        if (shortest) {
+            long now_sec, now_ms;
+
+            aeGetTime(&now_sec, &now_ms);
+            tvp = &tv;
+
+            /* How many milliseconds we need to wait for the next
+             * time event to fire? */
+            long long ms =
+                (shortest->when_sec - now_sec)*1000 +
+                shortest->when_ms - now_ms;
+
+            if (ms > 0) {
+                tvp->tv_sec = ms/1000;
+                tvp->tv_usec = (ms % 1000)*1000;
+            } else {
+                tvp->tv_sec = 0;
+                tvp->tv_usec = 0;
+            }
+        } else {
+            /* If we have to check for events but need to return
+             * ASAP because of AE_DONT_WAIT we need to set the timeout
+             * to zero */
+            if (flags & AE_DONT_WAIT) {
+                tv.tv_sec = tv.tv_usec = 0;
+                tvp = &tv;
+            } else {
+                /* Otherwise we can block */
+                tvp = NULL; /* wait forever */
+            }
+        }
+
+        numevents = aeApiPoll(eventLoop, tvp);
+        for (j = 0; j < numevents; j++) {
+            aeFileEvent *fe = &eventLoop->events[eventLoop->fired[j].fd];
+            int mask = eventLoop->fired[j].mask;
+            int fd = eventLoop->fired[j].fd;
+            int rfired = 0;
+
+	    /* note the fe->mask & mask & ... code: maybe an already processed
+             * event removed an element that fired and we still didn't
+             * processed, so we check if the event is still valid. */
+            if (fe->mask & mask & AE_READABLE) {
+                rfired = 1;
+                fe->rfileProc(eventLoop,fd,fe->clientData,mask);
+            }
+            if (fe->mask & mask & AE_WRITABLE) {
+                if (!rfired || fe->wfileProc != fe->rfileProc)
+                    fe->wfileProc(eventLoop,fd,fe->clientData,mask);
+            }
+            processed++;
+        }
+    }
+    /* Check time events */
+    if (flags & AE_TIME_EVENTS)
+        processed += processTimeEvents(eventLoop);
+
+    return processed; /* return the number of processed file/time events */
+}
+
+/* Wait for milliseconds until the given file descriptor becomes
+ * writable/readable/exception */
+int aeWait(int fd, int mask, long long milliseconds) {
+    struct pollfd pfd;
+    int retmask = 0, retval;
+
+    memset(&pfd, 0, sizeof(pfd));
+    pfd.fd = fd;
+    if (mask & AE_READABLE) pfd.events |= POLLIN;
+    if (mask & AE_WRITABLE) pfd.events |= POLLOUT;
+
+    if ((retval = poll(&pfd, 1, milliseconds))== 1) {
+        if (pfd.revents & POLLIN) retmask |= AE_READABLE;
+        if (pfd.revents & POLLOUT) retmask |= AE_WRITABLE;
+	if (pfd.revents & POLLERR) retmask |= AE_WRITABLE;
+        if (pfd.revents & POLLHUP) retmask |= AE_WRITABLE;
+        return retmask;
+    } else {
+        return retval;
+    }
+}
+
+void aeMain(aeEventLoop *eventLoop) {
+    eventLoop->stop = 0;
+    while (!eventLoop->stop) {
+        if (eventLoop->beforesleep != NULL)
+            eventLoop->beforesleep(eventLoop);
+        aeProcessEvents(eventLoop, AE_ALL_EVENTS);
+    }
+}
+
+char *aeGetApiName(void) {
+    return aeApiName();
+}
+
+void aeSetBeforeSleepProc(aeEventLoop *eventLoop, aeBeforeSleepProc *beforesleep) {
+    eventLoop->beforesleep = beforesleep;
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/thirdparty/ae/ae.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/thirdparty/ae/ae.h b/cpp/src/plasma/thirdparty/ae/ae.h
new file mode 100644
index 0000000..827c4c9
--- /dev/null
+++ b/cpp/src/plasma/thirdparty/ae/ae.h
@@ -0,0 +1,123 @@
+/* A simple event-driven programming library. Originally I wrote this code
+ * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated
+ * it in form of a library for easy reuse.
+ *
+ * Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *   * Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *   * Neither the name of Redis nor the names of its contributors may be used
+ *     to endorse or promote products derived from this software without
+ *     specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __AE_H__
+#define __AE_H__
+
+#include <time.h>
+
+#define AE_OK 0
+#define AE_ERR -1
+
+#define AE_NONE 0
+#define AE_READABLE 1
+#define AE_WRITABLE 2
+
+#define AE_FILE_EVENTS 1
+#define AE_TIME_EVENTS 2
+#define AE_ALL_EVENTS (AE_FILE_EVENTS|AE_TIME_EVENTS)
+#define AE_DONT_WAIT 4
+
+#define AE_NOMORE -1
+#define AE_DELETED_EVENT_ID -1
+
+/* Macros */
+#define AE_NOTUSED(V) ((void) V)
+
+struct aeEventLoop;
+
+/* Types and data structures */
+typedef void aeFileProc(struct aeEventLoop *eventLoop, int fd, void *clientData, int mask);
+typedef int aeTimeProc(struct aeEventLoop *eventLoop, long long id, void *clientData);
+typedef void aeEventFinalizerProc(struct aeEventLoop *eventLoop, void *clientData);
+typedef void aeBeforeSleepProc(struct aeEventLoop *eventLoop);
+
+/* File event structure */
+typedef struct aeFileEvent {
+    int mask; /* one of AE_(READABLE|WRITABLE) */
+    aeFileProc *rfileProc;
+    aeFileProc *wfileProc;
+    void *clientData;
+} aeFileEvent;
+
+/* Time event structure */
+typedef struct aeTimeEvent {
+    long long id; /* time event identifier. */
+    long when_sec; /* seconds */
+    long when_ms; /* milliseconds */
+    aeTimeProc *timeProc;
+    aeEventFinalizerProc *finalizerProc;
+    void *clientData;
+    struct aeTimeEvent *next;
+} aeTimeEvent;
+
+/* A fired event */
+typedef struct aeFiredEvent {
+    int fd;
+    int mask;
+} aeFiredEvent;
+
+/* State of an event based program */
+typedef struct aeEventLoop {
+    int maxfd;   /* highest file descriptor currently registered */
+    int setsize; /* max number of file descriptors tracked */
+    long long timeEventNextId;
+    time_t lastTime;     /* Used to detect system clock skew */
+    aeFileEvent *events; /* Registered events */
+    aeFiredEvent *fired; /* Fired events */
+    aeTimeEvent *timeEventHead;
+    int stop;
+    void *apidata; /* This is used for polling API specific data */
+    aeBeforeSleepProc *beforesleep;
+} aeEventLoop;
+
+/* Prototypes */
+aeEventLoop *aeCreateEventLoop(int setsize);
+void aeDeleteEventLoop(aeEventLoop *eventLoop);
+void aeStop(aeEventLoop *eventLoop);
+int aeCreateFileEvent(aeEventLoop *eventLoop, int fd, int mask,
+        aeFileProc *proc, void *clientData);
+void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask);
+int aeGetFileEvents(aeEventLoop *eventLoop, int fd);
+long long aeCreateTimeEvent(aeEventLoop *eventLoop, long long milliseconds,
+        aeTimeProc *proc, void *clientData,
+        aeEventFinalizerProc *finalizerProc);
+int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id);
+int aeProcessEvents(aeEventLoop *eventLoop, int flags);
+int aeWait(int fd, int mask, long long milliseconds);
+void aeMain(aeEventLoop *eventLoop);
+char *aeGetApiName(void);
+void aeSetBeforeSleepProc(aeEventLoop *eventLoop, aeBeforeSleepProc *beforesleep);
+int aeGetSetSize(aeEventLoop *eventLoop);
+int aeResizeSetSize(aeEventLoop *eventLoop, int setsize);
+
+#endif

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/thirdparty/ae/ae_epoll.c
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/thirdparty/ae/ae_epoll.c b/cpp/src/plasma/thirdparty/ae/ae_epoll.c
new file mode 100644
index 0000000..410aac7
--- /dev/null
+++ b/cpp/src/plasma/thirdparty/ae/ae_epoll.c
@@ -0,0 +1,135 @@
+/* Linux epoll(2) based ae.c module
+ *
+ * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *   * Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *   * Neither the name of Redis nor the names of its contributors may be used
+ *     to endorse or promote products derived from this software without
+ *     specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <sys/epoll.h>
+
+typedef struct aeApiState {
+    int epfd;
+    struct epoll_event *events;
+} aeApiState;
+
+static int aeApiCreate(aeEventLoop *eventLoop) {
+    aeApiState *state = zmalloc(sizeof(aeApiState));
+
+    if (!state) return -1;
+    state->events = zmalloc(sizeof(struct epoll_event)*eventLoop->setsize);
+    if (!state->events) {
+        zfree(state);
+        return -1;
+    }
+    state->epfd = epoll_create(1024); /* 1024 is just a hint for the kernel */
+    if (state->epfd == -1) {
+        zfree(state->events);
+        zfree(state);
+        return -1;
+    }
+    eventLoop->apidata = state;
+    return 0;
+}
+
+static int aeApiResize(aeEventLoop *eventLoop, int setsize) {
+    aeApiState *state = eventLoop->apidata;
+
+    state->events = zrealloc(state->events, sizeof(struct epoll_event)*setsize);
+    return 0;
+}
+
+static void aeApiFree(aeEventLoop *eventLoop) {
+    aeApiState *state = eventLoop->apidata;
+
+    close(state->epfd);
+    zfree(state->events);
+    zfree(state);
+}
+
+static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) {
+    aeApiState *state = eventLoop->apidata;
+    struct epoll_event ee = {0}; /* avoid valgrind warning */
+    /* If the fd was already monitored for some event, we need a MOD
+     * operation. Otherwise we need an ADD operation. */
+    int op = eventLoop->events[fd].mask == AE_NONE ?
+            EPOLL_CTL_ADD : EPOLL_CTL_MOD;
+
+    ee.events = 0;
+    mask |= eventLoop->events[fd].mask; /* Merge old events */
+    if (mask & AE_READABLE) ee.events |= EPOLLIN;
+    if (mask & AE_WRITABLE) ee.events |= EPOLLOUT;
+    ee.data.fd = fd;
+    if (epoll_ctl(state->epfd,op,fd,&ee) == -1) return -1;
+    return 0;
+}
+
+static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int delmask) {
+    aeApiState *state = eventLoop->apidata;
+    struct epoll_event ee = {0}; /* avoid valgrind warning */
+    int mask = eventLoop->events[fd].mask & (~delmask);
+
+    ee.events = 0;
+    if (mask & AE_READABLE) ee.events |= EPOLLIN;
+    if (mask & AE_WRITABLE) ee.events |= EPOLLOUT;
+    ee.data.fd = fd;
+    if (mask != AE_NONE) {
+        epoll_ctl(state->epfd,EPOLL_CTL_MOD,fd,&ee);
+    } else {
+        /* Note, Kernel < 2.6.9 requires a non null event pointer even for
+         * EPOLL_CTL_DEL. */
+        epoll_ctl(state->epfd,EPOLL_CTL_DEL,fd,&ee);
+    }
+}
+
+static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) {
+    aeApiState *state = eventLoop->apidata;
+    int retval, numevents = 0;
+
+    retval = epoll_wait(state->epfd,state->events,eventLoop->setsize,
+            tvp ? (tvp->tv_sec*1000 + tvp->tv_usec/1000) : -1);
+    if (retval > 0) {
+        int j;
+
+        numevents = retval;
+        for (j = 0; j < numevents; j++) {
+            int mask = 0;
+            struct epoll_event *e = state->events+j;
+
+            if (e->events & EPOLLIN) mask |= AE_READABLE;
+            if (e->events & EPOLLOUT) mask |= AE_WRITABLE;
+            if (e->events & EPOLLERR) mask |= AE_WRITABLE;
+            if (e->events & EPOLLHUP) mask |= AE_WRITABLE;
+            eventLoop->fired[j].fd = e->data.fd;
+            eventLoop->fired[j].mask = mask;
+        }
+    }
+    return numevents;
+}
+
+static char *aeApiName(void) {
+    return "epoll";
+}


[5/6] arrow git commit: ARROW-1104: Integrate in-memory object store into arrow

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/extension.cc
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/extension.cc b/cpp/src/plasma/extension.cc
new file mode 100644
index 0000000..5d61e33
--- /dev/null
+++ b/cpp/src/plasma/extension.cc
@@ -0,0 +1,456 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "plasma/extension.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "plasma/client.h"
+#include "plasma/common.h"
+#include "plasma/io.h"
+#include "plasma/protocol.h"
+
+PyObject* PlasmaOutOfMemoryError;
+PyObject* PlasmaObjectExistsError;
+
+PyObject* PyPlasma_connect(PyObject* self, PyObject* args) {
+  const char* store_socket_name;
+  const char* manager_socket_name;
+  int release_delay;
+  if (!PyArg_ParseTuple(
+          args, "ssi", &store_socket_name, &manager_socket_name, &release_delay)) {
+    return NULL;
+  }
+  PlasmaClient* client = new PlasmaClient();
+  ARROW_CHECK_OK(client->Connect(store_socket_name, manager_socket_name, release_delay));
+
+  return PyCapsule_New(client, "plasma", NULL);
+}
+
+PyObject* PyPlasma_disconnect(PyObject* self, PyObject* args) {
+  PyObject* client_capsule;
+  if (!PyArg_ParseTuple(args, "O", &client_capsule)) { return NULL; }
+  PlasmaClient* client;
+  ARROW_CHECK(PyObjectToPlasmaClient(client_capsule, &client));
+  ARROW_CHECK_OK(client->Disconnect());
+  /* We use the context of the connection capsule to indicate if the connection
+   * is still active (if the context is NULL) or if it is closed (if the context
+   * is (void*) 0x1). This is neccessary because the primary pointer of the
+   * capsule cannot be NULL. */
+  PyCapsule_SetContext(client_capsule, reinterpret_cast<void*>(0x1));
+  Py_RETURN_NONE;
+}
+
+PyObject* PyPlasma_create(PyObject* self, PyObject* args) {
+  PlasmaClient* client;
+  ObjectID object_id;
+  Py_ssize_t size;
+  PyObject* metadata;
+  if (!PyArg_ParseTuple(args, "O&O&nO", PyObjectToPlasmaClient, &client,
+          PyStringToUniqueID, &object_id, &size, &metadata)) {
+    return NULL;
+  }
+  if (!PyByteArray_Check(metadata)) {
+    PyErr_SetString(PyExc_TypeError, "metadata must be a bytearray");
+    return NULL;
+  }
+  uint8_t* data;
+  Status s = client->Create(object_id, size,
+      reinterpret_cast<uint8_t*>(PyByteArray_AsString(metadata)),
+      PyByteArray_Size(metadata), &data);
+  if (s.IsPlasmaObjectExists()) {
+    PyErr_SetString(PlasmaObjectExistsError,
+        "An object with this ID already exists in the plasma "
+        "store.");
+    return NULL;
+  }
+  if (s.IsPlasmaStoreFull()) {
+    PyErr_SetString(PlasmaOutOfMemoryError,
+        "The plasma store ran out of memory and could not create "
+        "this object.");
+    return NULL;
+  }
+  ARROW_CHECK(s.ok());
+
+#if PY_MAJOR_VERSION >= 3
+  return PyMemoryView_FromMemory(reinterpret_cast<char*>(data), size, PyBUF_WRITE);
+#else
+  return PyBuffer_FromReadWriteMemory(reinterpret_cast<void*>(data), size);
+#endif
+}
+
+PyObject* PyPlasma_hash(PyObject* self, PyObject* args) {
+  PlasmaClient* client;
+  ObjectID object_id;
+  if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID,
+          &object_id)) {
+    return NULL;
+  }
+  unsigned char digest[kDigestSize];
+  bool success = plasma_compute_object_hash(client, object_id, digest);
+  if (success) {
+    PyObject* digest_string =
+        PyBytes_FromStringAndSize(reinterpret_cast<char*>(digest), kDigestSize);
+    return digest_string;
+  } else {
+    Py_RETURN_NONE;
+  }
+}
+
+PyObject* PyPlasma_seal(PyObject* self, PyObject* args) {
+  PlasmaClient* client;
+  ObjectID object_id;
+  if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID,
+          &object_id)) {
+    return NULL;
+  }
+  ARROW_CHECK_OK(client->Seal(object_id));
+  Py_RETURN_NONE;
+}
+
+PyObject* PyPlasma_release(PyObject* self, PyObject* args) {
+  PlasmaClient* client;
+  ObjectID object_id;
+  if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID,
+          &object_id)) {
+    return NULL;
+  }
+  ARROW_CHECK_OK(client->Release(object_id));
+  Py_RETURN_NONE;
+}
+
+PyObject* PyPlasma_get(PyObject* self, PyObject* args) {
+  PlasmaClient* client;
+  PyObject* object_id_list;
+  Py_ssize_t timeout_ms;
+  if (!PyArg_ParseTuple(
+          args, "O&On", PyObjectToPlasmaClient, &client, &object_id_list, &timeout_ms)) {
+    return NULL;
+  }
+
+  Py_ssize_t num_object_ids = PyList_Size(object_id_list);
+  std::vector<ObjectID> object_ids(num_object_ids);
+  std::vector<ObjectBuffer> object_buffers(num_object_ids);
+
+  for (int i = 0; i < num_object_ids; ++i) {
+    PyStringToUniqueID(PyList_GetItem(object_id_list, i), &object_ids[i]);
+  }
+
+  Py_BEGIN_ALLOW_THREADS;
+  ARROW_CHECK_OK(
+      client->Get(object_ids.data(), num_object_ids, timeout_ms, object_buffers.data()));
+  Py_END_ALLOW_THREADS;
+
+  PyObject* returns = PyList_New(num_object_ids);
+  for (int i = 0; i < num_object_ids; ++i) {
+    if (object_buffers[i].data_size != -1) {
+      /* The object was retrieved, so return the object. */
+      PyObject* t = PyTuple_New(2);
+      Py_ssize_t data_size = static_cast<Py_ssize_t>(object_buffers[i].data_size);
+      Py_ssize_t metadata_size = static_cast<Py_ssize_t>(object_buffers[i].metadata_size);
+#if PY_MAJOR_VERSION >= 3
+      char* data = reinterpret_cast<char*>(object_buffers[i].data);
+      char* metadata = reinterpret_cast<char*>(object_buffers[i].metadata);
+      PyTuple_SET_ITEM(t, 0, PyMemoryView_FromMemory(data, data_size, PyBUF_READ));
+      PyTuple_SET_ITEM(
+          t, 1, PyMemoryView_FromMemory(metadata, metadata_size, PyBUF_READ));
+#else
+      void* data = reinterpret_cast<void*>(object_buffers[i].data);
+      void* metadata = reinterpret_cast<void*>(object_buffers[i].metadata);
+      PyTuple_SET_ITEM(t, 0, PyBuffer_FromMemory(data, data_size));
+      PyTuple_SET_ITEM(t, 1, PyBuffer_FromMemory(metadata, metadata_size));
+#endif
+      ARROW_CHECK(PyList_SetItem(returns, i, t) == 0);
+    } else {
+      /* The object was not retrieved, so just add None to the list of return
+       * values. */
+      Py_INCREF(Py_None);
+      ARROW_CHECK(PyList_SetItem(returns, i, Py_None) == 0);
+    }
+  }
+  return returns;
+}
+
+PyObject* PyPlasma_contains(PyObject* self, PyObject* args) {
+  PlasmaClient* client;
+  ObjectID object_id;
+  if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID,
+          &object_id)) {
+    return NULL;
+  }
+  bool has_object;
+  ARROW_CHECK_OK(client->Contains(object_id, &has_object));
+
+  if (has_object) {
+    Py_RETURN_TRUE;
+  } else {
+    Py_RETURN_FALSE;
+  }
+}
+
+PyObject* PyPlasma_fetch(PyObject* self, PyObject* args) {
+  PlasmaClient* client;
+  PyObject* object_id_list;
+  if (!PyArg_ParseTuple(args, "O&O", PyObjectToPlasmaClient, &client, &object_id_list)) {
+    return NULL;
+  }
+  if (client->get_manager_fd() == -1) {
+    PyErr_SetString(PyExc_RuntimeError, "Not connected to the plasma manager");
+    return NULL;
+  }
+  Py_ssize_t n = PyList_Size(object_id_list);
+  ObjectID* object_ids = new ObjectID[n];
+  for (int i = 0; i < n; ++i) {
+    PyStringToUniqueID(PyList_GetItem(object_id_list, i), &object_ids[i]);
+  }
+  ARROW_CHECK_OK(client->Fetch(static_cast<int>(n), object_ids));
+  delete[] object_ids;
+  Py_RETURN_NONE;
+}
+
+PyObject* PyPlasma_wait(PyObject* self, PyObject* args) {
+  PlasmaClient* client;
+  PyObject* object_id_list;
+  Py_ssize_t timeout;
+  int num_returns;
+  if (!PyArg_ParseTuple(args, "O&Oni", PyObjectToPlasmaClient, &client, &object_id_list,
+          &timeout, &num_returns)) {
+    return NULL;
+  }
+  Py_ssize_t n = PyList_Size(object_id_list);
+
+  if (client->get_manager_fd() == -1) {
+    PyErr_SetString(PyExc_RuntimeError, "Not connected to the plasma manager");
+    return NULL;
+  }
+  if (num_returns < 0) {
+    PyErr_SetString(
+        PyExc_RuntimeError, "The argument num_returns cannot be less than zero.");
+    return NULL;
+  }
+  if (num_returns > n) {
+    PyErr_SetString(PyExc_RuntimeError,
+        "The argument num_returns cannot be greater than len(object_ids)");
+    return NULL;
+  }
+  int64_t threshold = 1 << 30;
+  if (timeout > threshold) {
+    PyErr_SetString(
+        PyExc_RuntimeError, "The argument timeout cannot be greater than 2 ** 30.");
+    return NULL;
+  }
+
+  std::vector<ObjectRequest> object_requests(n);
+  for (int i = 0; i < n; ++i) {
+    ARROW_CHECK(PyStringToUniqueID(PyList_GetItem(object_id_list, i),
+                    &object_requests[i].object_id) == 1);
+    object_requests[i].type = PLASMA_QUERY_ANYWHERE;
+  }
+  /* Drop the global interpreter lock while we are waiting, so other threads can
+   * run. */
+  int num_return_objects;
+  Py_BEGIN_ALLOW_THREADS;
+  ARROW_CHECK_OK(
+      client->Wait(n, object_requests.data(), num_returns, timeout, &num_return_objects));
+  Py_END_ALLOW_THREADS;
+
+  int num_to_return = std::min(num_return_objects, num_returns);
+  PyObject* ready_ids = PyList_New(num_to_return);
+  PyObject* waiting_ids = PySet_New(object_id_list);
+  int num_returned = 0;
+  for (int i = 0; i < n; ++i) {
+    if (num_returned == num_to_return) { break; }
+    if (object_requests[i].status == ObjectStatus_Local ||
+        object_requests[i].status == ObjectStatus_Remote) {
+      PyObject* ready = PyBytes_FromStringAndSize(
+          reinterpret_cast<char*>(&object_requests[i].object_id),
+          sizeof(object_requests[i].object_id));
+      PyList_SetItem(ready_ids, num_returned, ready);
+      PySet_Discard(waiting_ids, ready);
+      num_returned += 1;
+    } else {
+      ARROW_CHECK(object_requests[i].status == ObjectStatus_Nonexistent);
+    }
+  }
+  ARROW_CHECK(num_returned == num_to_return);
+  /* Return both the ready IDs and the remaining IDs. */
+  PyObject* t = PyTuple_New(2);
+  PyTuple_SetItem(t, 0, ready_ids);
+  PyTuple_SetItem(t, 1, waiting_ids);
+  return t;
+}
+
+PyObject* PyPlasma_evict(PyObject* self, PyObject* args) {
+  PlasmaClient* client;
+  Py_ssize_t num_bytes;
+  if (!PyArg_ParseTuple(args, "O&n", PyObjectToPlasmaClient, &client, &num_bytes)) {
+    return NULL;
+  }
+  int64_t evicted_bytes;
+  ARROW_CHECK_OK(client->Evict(static_cast<int64_t>(num_bytes), evicted_bytes));
+  return PyLong_FromSsize_t(static_cast<Py_ssize_t>(evicted_bytes));
+}
+
+PyObject* PyPlasma_delete(PyObject* self, PyObject* args) {
+  PlasmaClient* client;
+  ObjectID object_id;
+  if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID,
+          &object_id)) {
+    return NULL;
+  }
+  ARROW_CHECK_OK(client->Delete(object_id));
+  Py_RETURN_NONE;
+}
+
+PyObject* PyPlasma_transfer(PyObject* self, PyObject* args) {
+  PlasmaClient* client;
+  ObjectID object_id;
+  const char* addr;
+  int port;
+  if (!PyArg_ParseTuple(args, "O&O&si", PyObjectToPlasmaClient, &client,
+          PyStringToUniqueID, &object_id, &addr, &port)) {
+    return NULL;
+  }
+
+  if (client->get_manager_fd() == -1) {
+    PyErr_SetString(PyExc_RuntimeError, "Not connected to the plasma manager");
+    return NULL;
+  }
+
+  ARROW_CHECK_OK(client->Transfer(addr, port, object_id));
+  Py_RETURN_NONE;
+}
+
+PyObject* PyPlasma_subscribe(PyObject* self, PyObject* args) {
+  PlasmaClient* client;
+  if (!PyArg_ParseTuple(args, "O&", PyObjectToPlasmaClient, &client)) { return NULL; }
+
+  int sock;
+  ARROW_CHECK_OK(client->Subscribe(&sock));
+  return PyLong_FromLong(sock);
+}
+
+PyObject* PyPlasma_receive_notification(PyObject* self, PyObject* args) {
+  int plasma_sock;
+
+  if (!PyArg_ParseTuple(args, "i", &plasma_sock)) { return NULL; }
+  /* Receive object notification from the plasma connection socket. If the
+   * object was added, return a tuple of its fields: ObjectID, data_size,
+   * metadata_size. If the object was deleted, data_size and metadata_size will
+   * be set to -1. */
+  uint8_t* notification = read_message_async(plasma_sock);
+  if (notification == NULL) {
+    PyErr_SetString(
+        PyExc_RuntimeError, "Failed to read object notification from Plasma socket");
+    return NULL;
+  }
+  auto object_info = flatbuffers::GetRoot<ObjectInfo>(notification);
+  /* Construct a tuple from object_info and return. */
+  PyObject* t = PyTuple_New(3);
+  PyTuple_SetItem(t, 0, PyBytes_FromStringAndSize(object_info->object_id()->data(),
+                            object_info->object_id()->size()));
+  if (object_info->is_deletion()) {
+    PyTuple_SetItem(t, 1, PyLong_FromLong(-1));
+    PyTuple_SetItem(t, 2, PyLong_FromLong(-1));
+  } else {
+    PyTuple_SetItem(t, 1, PyLong_FromLong(object_info->data_size()));
+    PyTuple_SetItem(t, 2, PyLong_FromLong(object_info->metadata_size()));
+  }
+
+  delete[] notification;
+  return t;
+}
+
+static PyMethodDef plasma_methods[] = {
+    {"connect", PyPlasma_connect, METH_VARARGS, "Connect to plasma."},
+    {"disconnect", PyPlasma_disconnect, METH_VARARGS, "Disconnect from plasma."},
+    {"create", PyPlasma_create, METH_VARARGS, "Create a new plasma object."},
+    {"hash", PyPlasma_hash, METH_VARARGS, "Compute the hash of a plasma object."},
+    {"seal", PyPlasma_seal, METH_VARARGS, "Seal a plasma object."},
+    {"get", PyPlasma_get, METH_VARARGS, "Get a plasma object."},
+    {"contains", PyPlasma_contains, METH_VARARGS,
+        "Does the plasma store contain this plasma object?"},
+    {"fetch", PyPlasma_fetch, METH_VARARGS,
+        "Fetch the object from another plasma manager instance."},
+    {"wait", PyPlasma_wait, METH_VARARGS,
+        "Wait until num_returns objects in object_ids are ready."},
+    {"evict", PyPlasma_evict, METH_VARARGS,
+        "Evict some objects until we recover some number of bytes."},
+    {"release", PyPlasma_release, METH_VARARGS, "Release the plasma object."},
+    {"delete", PyPlasma_delete, METH_VARARGS, "Delete a plasma object."},
+    {"transfer", PyPlasma_transfer, METH_VARARGS,
+        "Transfer object to another plasma manager."},
+    {"subscribe", PyPlasma_subscribe, METH_VARARGS,
+        "Subscribe to the plasma notification socket."},
+    {"receive_notification", PyPlasma_receive_notification, METH_VARARGS,
+        "Receive next notification from plasma notification socket."},
+    {NULL} /* Sentinel */
+};
+
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef moduledef = {
+    PyModuleDef_HEAD_INIT, "libplasma",    /* m_name */
+    "A Python client library for plasma.", /* m_doc */
+    0,                                     /* m_size */
+    plasma_methods,                        /* m_methods */
+    NULL,                                  /* m_reload */
+    NULL,                                  /* m_traverse */
+    NULL,                                  /* m_clear */
+    NULL,                                  /* m_free */
+};
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+#define INITERROR return NULL
+#else
+#define INITERROR return
+#endif
+
+#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
+#define PyMODINIT_FUNC void
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+#define MOD_INIT(name) PyMODINIT_FUNC PyInit_##name(void)
+#else
+#define MOD_INIT(name) PyMODINIT_FUNC init##name(void)
+#endif
+
+MOD_INIT(libplasma) {
+#if PY_MAJOR_VERSION >= 3
+  PyObject* m = PyModule_Create(&moduledef);
+#else
+  PyObject* m =
+      Py_InitModule3("libplasma", plasma_methods, "A Python client library for plasma.");
+#endif
+
+  /* Create a custom exception for when an object ID is reused. */
+  char plasma_object_exists_error[] = "plasma_object_exists.error";
+  PlasmaObjectExistsError = PyErr_NewException(plasma_object_exists_error, NULL, NULL);
+  Py_INCREF(PlasmaObjectExistsError);
+  PyModule_AddObject(m, "plasma_object_exists_error", PlasmaObjectExistsError);
+  /* Create a custom exception for when the plasma store is out of memory. */
+  char plasma_out_of_memory_error[] = "plasma_out_of_memory.error";
+  PlasmaOutOfMemoryError = PyErr_NewException(plasma_out_of_memory_error, NULL, NULL);
+  Py_INCREF(PlasmaOutOfMemoryError);
+  PyModule_AddObject(m, "plasma_out_of_memory_error", PlasmaOutOfMemoryError);
+
+#if PY_MAJOR_VERSION >= 3
+  return m;
+#endif
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/extension.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/extension.h b/cpp/src/plasma/extension.h
new file mode 100644
index 0000000..cee30ab
--- /dev/null
+++ b/cpp/src/plasma/extension.h
@@ -0,0 +1,50 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef PLASMA_EXTENSION_H
+#define PLASMA_EXTENSION_H
+
+#undef _XOPEN_SOURCE
+#undef _POSIX_C_SOURCE
+#include <Python.h>
+
+#include "bytesobject.h"  // NOLINT
+
+#include "plasma/client.h"
+#include "plasma/common.h"
+
+static int PyObjectToPlasmaClient(PyObject* object, PlasmaClient** client) {
+  if (PyCapsule_IsValid(object, "plasma")) {
+    *client = reinterpret_cast<PlasmaClient*>(PyCapsule_GetPointer(object, "plasma"));
+    return 1;
+  } else {
+    PyErr_SetString(PyExc_TypeError, "must be a 'plasma' capsule");
+    return 0;
+  }
+}
+
+int PyStringToUniqueID(PyObject* object, ObjectID* object_id) {
+  if (PyBytes_Check(object)) {
+    memcpy(object_id, PyBytes_AsString(object), sizeof(ObjectID));
+    return 1;
+  } else {
+    PyErr_SetString(PyExc_TypeError, "must be a 20 character string");
+    return 0;
+  }
+}
+
+#endif  // PLASMA_EXTENSION_H

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/fling.cc
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/fling.cc b/cpp/src/plasma/fling.cc
new file mode 100644
index 0000000..79da4f4
--- /dev/null
+++ b/cpp/src/plasma/fling.cc
@@ -0,0 +1,90 @@
+// Copyright 2013 Sharvil Nanavati
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "plasma/fling.h"
+
+#include <string.h>
+
+void init_msg(struct msghdr* msg, struct iovec* iov, char* buf, size_t buf_len) {
+  iov->iov_base = buf;
+  iov->iov_len = 1;
+
+  msg->msg_iov = iov;
+  msg->msg_iovlen = 1;
+  msg->msg_control = buf;
+  msg->msg_controllen = buf_len;
+  msg->msg_name = NULL;
+  msg->msg_namelen = 0;
+}
+
+int send_fd(int conn, int fd) {
+  struct msghdr msg;
+  struct iovec iov;
+  char buf[CMSG_SPACE(sizeof(int))];
+  memset(&buf, 0, CMSG_SPACE(sizeof(int)));
+
+  init_msg(&msg, &iov, buf, sizeof(buf));
+
+  struct cmsghdr* header = CMSG_FIRSTHDR(&msg);
+  header->cmsg_level = SOL_SOCKET;
+  header->cmsg_type = SCM_RIGHTS;
+  header->cmsg_len = CMSG_LEN(sizeof(int));
+  *reinterpret_cast<int*>(CMSG_DATA(header)) = fd;
+
+  // Send file descriptor.
+  ssize_t r = sendmsg(conn, &msg, 0);
+  if (r >= 0) {
+    return 0;
+  } else {
+    return static_cast<int>(r);
+  }
+}
+
+int recv_fd(int conn) {
+  struct msghdr msg;
+  struct iovec iov;
+  char buf[CMSG_SPACE(sizeof(int))];
+  init_msg(&msg, &iov, buf, sizeof(buf));
+
+  if (recvmsg(conn, &msg, 0) == -1) return -1;
+
+  int found_fd = -1;
+  int oh_noes = 0;
+  for (struct cmsghdr* header = CMSG_FIRSTHDR(&msg); header != NULL;
+       header = CMSG_NXTHDR(&msg, header))
+    if (header->cmsg_level == SOL_SOCKET && header->cmsg_type == SCM_RIGHTS) {
+      ssize_t count =
+          (header->cmsg_len - (CMSG_DATA(header) - (unsigned char*)header)) / sizeof(int);
+      for (int i = 0; i < count; ++i) {
+        int fd = (reinterpret_cast<int*>(CMSG_DATA(header)))[i];
+        if (found_fd == -1) {
+          found_fd = fd;
+        } else {
+          close(fd);
+          oh_noes = 1;
+        }
+      }
+    }
+
+  // The sender sent us more than one file descriptor. We've closed
+  // them all to prevent fd leaks but notify the caller that we got
+  // a bad message.
+  if (oh_noes) {
+    close(found_fd);
+    errno = EBADMSG;
+    return -1;
+  }
+
+  return found_fd;
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/fling.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/fling.h b/cpp/src/plasma/fling.h
new file mode 100644
index 0000000..78ac9d1
--- /dev/null
+++ b/cpp/src/plasma/fling.h
@@ -0,0 +1,52 @@
+// Copyright 2013 Sharvil Nanavati
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// FLING: Exchanging file descriptors over sockets
+//
+// This is a little library for sending file descriptors over a socket
+// between processes. The reason for doing that (as opposed to using
+// filenames to share the files) is so (a) no files remain in the
+// filesystem after all the processes terminate, (b) to make sure that
+// there are no name collisions and (c) to be able to control who has
+// access to the data.
+//
+// Most of the code is from https://github.com/sharvil/flingfd
+
+#include <errno.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+// This is neccessary for Mac OS X, see http://www.apuebook.com/faqs2e.html
+// (10).
+#if !defined(CMSG_SPACE) && !defined(CMSG_LEN)
+#define CMSG_SPACE(len) (__DARWIN_ALIGN32(sizeof(struct cmsghdr)) + __DARWIN_ALIGN32(len))
+#define CMSG_LEN(len) (__DARWIN_ALIGN32(sizeof(struct cmsghdr)) + (len))
+#endif
+
+void init_msg(struct msghdr* msg, struct iovec* iov, char* buf, size_t buf_len);
+
+// Send a file descriptor over a unix domain socket.
+//
+// @param conn Unix domain socket to send the file descriptor over.
+// @param fd File descriptor to send over.
+// @return Status code which is < 0 on failure.
+int send_fd(int conn, int fd);
+
+// Receive a file descriptor over a unix domain socket.
+//
+// @param conn Unix domain socket to receive the file descriptor from.
+// @return File descriptor or a value < 0 on failure.
+int recv_fd(int conn);

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/format/common.fbs
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/format/common.fbs b/cpp/src/plasma/format/common.fbs
new file mode 100644
index 0000000..4d7d285
--- /dev/null
+++ b/cpp/src/plasma/format/common.fbs
@@ -0,0 +1,34 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Object information data structure.
+table ObjectInfo {
+  // Object ID of this object.
+  object_id: string;
+  // Number of bytes the content of this object occupies in memory.
+  data_size: long;
+  // Number of bytes the metadata of this object occupies in memory.
+  metadata_size: long;
+  // Unix epoch of when this object was created.
+  create_time: long;
+  // How long creation of this object took.
+  construct_duration: long;
+  // Hash of the object content.
+  digest: string;
+  // Specifies if this object was deleted or added.
+  is_deletion: bool;
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/format/plasma.fbs
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/format/plasma.fbs b/cpp/src/plasma/format/plasma.fbs
new file mode 100644
index 0000000..23782ad
--- /dev/null
+++ b/cpp/src/plasma/format/plasma.fbs
@@ -0,0 +1,291 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Plasma protocol specification
+
+enum MessageType:int {
+  // Create a new object.
+  PlasmaCreateRequest = 1,
+  PlasmaCreateReply,
+  // Seal an object.
+  PlasmaSealRequest,
+  PlasmaSealReply,
+  // Get an object that is stored on the local Plasma store.
+  PlasmaGetRequest,
+  PlasmaGetReply,
+  // Release an object.
+  PlasmaReleaseRequest,
+  PlasmaReleaseReply,
+  // Delete an object.
+  PlasmaDeleteRequest,
+  PlasmaDeleteReply,
+  // Get status of an object.
+  PlasmaStatusRequest,
+  PlasmaStatusReply,
+  // See if the store contains an object (will be deprecated).
+  PlasmaContainsRequest,
+  PlasmaContainsReply,
+  // Get information for a newly connecting client.
+  PlasmaConnectRequest,
+  PlasmaConnectReply,
+  // Make room for new objects in the plasma store.
+  PlasmaEvictRequest,
+  PlasmaEvictReply,
+  // Fetch objects from remote Plasma stores.
+  PlasmaFetchRequest,
+  // Wait for objects to be ready either from local or remote Plasma stores.
+  PlasmaWaitRequest,
+  PlasmaWaitReply,
+  // Subscribe to a list of objects or to all objects.
+  PlasmaSubscribeRequest,
+  // Unsubscribe.
+  PlasmaUnsubscribeRequest,
+  // Sending and receiving data.
+  // PlasmaDataRequest initiates sending the data, there will be one
+  // such message per data transfer.
+  PlasmaDataRequest,
+  // PlasmaDataReply contains the actual data and is sent back to the
+  // object store that requested the data. For each transfer, multiple
+  // reply messages get sent. Each one contains a fixed number of bytes.
+  PlasmaDataReply,
+  // Object notifications.
+  PlasmaNotification
+}
+
+enum PlasmaError:int {
+  // Operation was successful.
+  OK,
+  // Trying to create an object that already exists.
+  ObjectExists,
+  // Trying to access an object that doesn't exist.
+  ObjectNonexistent,
+  // Trying to create an object but there isn't enough space in the store.
+  OutOfMemory
+}
+
+// Plasma store messages
+
+struct PlasmaObjectSpec {
+  // Index of the memory segment (= memory mapped file) that
+  // this object is allocated in.
+  segment_index: int;
+  // Size in bytes of this segment (needed to call mmap).
+  mmap_size: ulong;
+  // The offset in bytes in the memory mapped file of the data.
+  data_offset: ulong;
+  // The size in bytes of the data.
+  data_size: ulong;
+  // The offset in bytes in the memory mapped file of the metadata.
+  metadata_offset: ulong;
+  // The size in bytes of the metadata.
+  metadata_size: ulong;
+}
+
+table PlasmaCreateRequest {
+  // ID of the object to be created.
+  object_id: string;
+  // The size of the object's data in bytes.
+  data_size: ulong;
+  // The size of the object's metadata in bytes.
+  metadata_size: ulong;
+}
+
+table PlasmaCreateReply {
+  // ID of the object that was created.
+  object_id: string;
+  // The object that is returned with this reply.
+  plasma_object: PlasmaObjectSpec;
+  // Error that occurred for this call.
+  error: PlasmaError;
+}
+
+table PlasmaSealRequest {
+  // ID of the object to be sealed.
+  object_id: string;
+  // Hash of the object data.
+  digest: string;
+}
+
+table PlasmaSealReply {
+  // ID of the object that was sealed.
+  object_id: string;
+  // Error code.
+  error: PlasmaError;
+}
+
+table PlasmaGetRequest {
+  // IDs of the objects stored at local Plasma store we are getting.
+  object_ids: [string];
+  // The number of milliseconds before the request should timeout.
+  timeout_ms: long;
+}
+
+table PlasmaGetReply {
+  // IDs of the objects being returned.
+  // This number can be smaller than the number of requested
+  // objects if not all requested objects are stored and sealed
+  // in the local Plasma store.
+  object_ids: [string];
+  // Plasma object information, in the same order as their IDs.
+  plasma_objects: [PlasmaObjectSpec];
+  // The number of elements in both object_ids and plasma_objects arrays must agree.
+}
+
+table PlasmaReleaseRequest {
+  // ID of the object to be released.
+  object_id: string;
+}
+
+table PlasmaReleaseReply {
+  // ID of the object that was released.
+  object_id: string;
+  // Error code.
+  error: PlasmaError;
+}
+
+table PlasmaDeleteRequest {
+  // ID of the object to be deleted.
+  object_id: string;
+}
+
+table PlasmaDeleteReply {
+  // ID of the object that was deleted.
+  object_id: string;
+  // Error code.
+  error: PlasmaError;
+}
+
+table PlasmaStatusRequest {
+  // IDs of the objects stored at local Plasma store we request the status of.
+  object_ids: [string];
+}
+
+enum ObjectStatus:int {
+  // Object is stored in the local Plasma Store.
+  Local = 1,
+  // Object is stored on a remote Plasma store, and it is not stored on the
+  // local Plasma Store.
+  Remote,
+  // Object is not stored in the system.
+  Nonexistent,
+  // Object is currently transferred from a remote Plasma store the the local
+  // Plasma Store.
+  Transfer
+}
+
+table PlasmaStatusReply {
+  // IDs of the objects being returned.
+  object_ids: [string];
+  // Status of the object.
+  status: [ObjectStatus];
+}
+
+// PlasmaContains is a subset of PlasmaStatus which does not
+// involve the plasma manager, only the store. We should consider
+// unifying them in the future and deprecating PlasmaContains.
+
+table PlasmaContainsRequest {
+  // ID of the object we are querying.
+  object_id: string;
+}
+
+table PlasmaContainsReply {
+  // ID of the object we are querying.
+  object_id: string;
+  // 1 if the object is in the store and 0 otherwise.
+  has_object: int;
+}
+
+// PlasmaConnect is used by a plasma client the first time it connects with the
+// store. This is not really necessary, but is used to get some information
+// about the store such as its memory capacity.
+
+table PlasmaConnectRequest {
+}
+
+table PlasmaConnectReply {
+  // The memory capacity of the store.
+  memory_capacity: long;
+}
+
+table PlasmaEvictRequest {
+  // Number of bytes that shall be freed.
+  num_bytes: ulong;
+}
+
+table PlasmaEvictReply {
+  // Number of bytes that have been freed.
+  num_bytes: ulong;
+}
+
+table PlasmaFetchRequest {
+  // IDs of objects to be gotten.
+  object_ids: [string];
+}
+
+table ObjectRequestSpec {
+  // ID of the object.
+  object_id: string;
+  // The type of the object. This specifies whether we
+  // will be waiting for an object store in the local or
+  // global Plasma store.
+  type: int;
+}
+
+table PlasmaWaitRequest {
+  // Array of object requests whose status we are asking for.
+  object_requests: [ObjectRequestSpec];
+  // Number of objects expected to be returned, if available.
+  num_ready_objects: int;
+  // timeout
+  timeout: long;
+}
+
+table ObjectReply {
+  // ID of the object.
+  object_id: string;
+  // The object status. This specifies where the object is stored.
+  status: int;
+}
+
+table PlasmaWaitReply {
+  // Array of object requests being returned.
+  object_requests: [ObjectReply];
+  // Number of objects expected to be returned, if available.
+  num_ready_objects: int;
+}
+
+table PlasmaSubscribeRequest {
+}
+
+table PlasmaDataRequest {
+  // ID of the object that is requested.
+  object_id: string;
+  // The host address where the data shall be sent to.
+  address: string;
+  // The port of the manager the data shall be sent to.
+  port: int;
+}
+
+table PlasmaDataReply {
+  // ID of the object that will be sent.
+  object_id: string;
+  // Size of the object data in bytes.
+  object_size: ulong;
+  // Size of the metadata in bytes.
+  metadata_size: ulong;
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/io.cc
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/io.cc b/cpp/src/plasma/io.cc
new file mode 100644
index 0000000..5875ebb
--- /dev/null
+++ b/cpp/src/plasma/io.cc
@@ -0,0 +1,212 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "plasma/io.h"
+
+#include "plasma/common.h"
+
+using arrow::Status;
+
+/* Number of times we try binding to a socket. */
+#define NUM_BIND_ATTEMPTS 5
+#define BIND_TIMEOUT_MS 100
+
+/* Number of times we try connecting to a socket. */
+#define NUM_CONNECT_ATTEMPTS 50
+#define CONNECT_TIMEOUT_MS 100
+
+Status WriteBytes(int fd, uint8_t* cursor, size_t length) {
+  ssize_t nbytes = 0;
+  size_t bytesleft = length;
+  size_t offset = 0;
+  while (bytesleft > 0) {
+    /* While we haven't written the whole message, write to the file descriptor,
+     * advance the cursor, and decrease the amount left to write. */
+    nbytes = write(fd, cursor + offset, bytesleft);
+    if (nbytes < 0) {
+      if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { continue; }
+      return Status::IOError(std::string(strerror(errno)));
+    } else if (nbytes == 0) {
+      return Status::IOError("Encountered unexpected EOF");
+    }
+    ARROW_CHECK(nbytes > 0);
+    bytesleft -= nbytes;
+    offset += nbytes;
+  }
+
+  return Status::OK();
+}
+
+Status WriteMessage(int fd, int64_t type, int64_t length, uint8_t* bytes) {
+  int64_t version = PLASMA_PROTOCOL_VERSION;
+  RETURN_NOT_OK(WriteBytes(fd, reinterpret_cast<uint8_t*>(&version), sizeof(version)));
+  RETURN_NOT_OK(WriteBytes(fd, reinterpret_cast<uint8_t*>(&type), sizeof(type)));
+  RETURN_NOT_OK(WriteBytes(fd, reinterpret_cast<uint8_t*>(&length), sizeof(length)));
+  return WriteBytes(fd, bytes, length * sizeof(char));
+}
+
+Status ReadBytes(int fd, uint8_t* cursor, size_t length) {
+  ssize_t nbytes = 0;
+  /* Termination condition: EOF or read 'length' bytes total. */
+  size_t bytesleft = length;
+  size_t offset = 0;
+  while (bytesleft > 0) {
+    nbytes = read(fd, cursor + offset, bytesleft);
+    if (nbytes < 0) {
+      if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { continue; }
+      return Status::IOError(std::string(strerror(errno)));
+    } else if (0 == nbytes) {
+      return Status::IOError("Encountered unexpected EOF");
+    }
+    ARROW_CHECK(nbytes > 0);
+    bytesleft -= nbytes;
+    offset += nbytes;
+  }
+
+  return Status::OK();
+}
+
+Status ReadMessage(int fd, int64_t* type, std::vector<uint8_t>* buffer) {
+  int64_t version;
+  RETURN_NOT_OK_ELSE(ReadBytes(fd, reinterpret_cast<uint8_t*>(&version), sizeof(version)),
+      *type = DISCONNECT_CLIENT);
+  ARROW_CHECK(version == PLASMA_PROTOCOL_VERSION) << "version = " << version;
+  size_t length;
+  RETURN_NOT_OK_ELSE(ReadBytes(fd, reinterpret_cast<uint8_t*>(type), sizeof(*type)),
+      *type = DISCONNECT_CLIENT);
+  RETURN_NOT_OK_ELSE(ReadBytes(fd, reinterpret_cast<uint8_t*>(&length), sizeof(length)),
+      *type = DISCONNECT_CLIENT);
+  if (length > buffer->size()) { buffer->resize(length); }
+  RETURN_NOT_OK_ELSE(ReadBytes(fd, buffer->data(), length), *type = DISCONNECT_CLIENT);
+  return Status::OK();
+}
+
+int bind_ipc_sock(const std::string& pathname, bool shall_listen) {
+  struct sockaddr_un socket_address;
+  int socket_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+  if (socket_fd < 0) {
+    ARROW_LOG(ERROR) << "socket() failed for pathname " << pathname;
+    return -1;
+  }
+  /* Tell the system to allow the port to be reused. */
+  int on = 1;
+  if (setsockopt(socket_fd, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast<char*>(&on),
+          sizeof(on)) < 0) {
+    ARROW_LOG(ERROR) << "setsockopt failed for pathname " << pathname;
+    close(socket_fd);
+    return -1;
+  }
+
+  unlink(pathname.c_str());
+  memset(&socket_address, 0, sizeof(socket_address));
+  socket_address.sun_family = AF_UNIX;
+  if (pathname.size() + 1 > sizeof(socket_address.sun_path)) {
+    ARROW_LOG(ERROR) << "Socket pathname is too long.";
+    close(socket_fd);
+    return -1;
+  }
+  strncpy(socket_address.sun_path, pathname.c_str(), pathname.size() + 1);
+
+  if (bind(socket_fd, (struct sockaddr*)&socket_address, sizeof(socket_address)) != 0) {
+    ARROW_LOG(ERROR) << "Bind failed for pathname " << pathname;
+    close(socket_fd);
+    return -1;
+  }
+  if (shall_listen && listen(socket_fd, 128) == -1) {
+    ARROW_LOG(ERROR) << "Could not listen to socket " << pathname;
+    close(socket_fd);
+    return -1;
+  }
+  return socket_fd;
+}
+
+int connect_ipc_sock_retry(
+    const std::string& pathname, int num_retries, int64_t timeout) {
+  /* Pick the default values if the user did not specify. */
+  if (num_retries < 0) { num_retries = NUM_CONNECT_ATTEMPTS; }
+  if (timeout < 0) { timeout = CONNECT_TIMEOUT_MS; }
+
+  int fd = -1;
+  for (int num_attempts = 0; num_attempts < num_retries; ++num_attempts) {
+    fd = connect_ipc_sock(pathname);
+    if (fd >= 0) { break; }
+    if (num_attempts == 0) {
+      ARROW_LOG(ERROR) << "Connection to socket failed for pathname " << pathname;
+    }
+    /* Sleep for timeout milliseconds. */
+    usleep(static_cast<int>(timeout * 1000));
+  }
+  /* If we could not connect to the socket, exit. */
+  if (fd == -1) { ARROW_LOG(FATAL) << "Could not connect to socket " << pathname; }
+  return fd;
+}
+
+int connect_ipc_sock(const std::string& pathname) {
+  struct sockaddr_un socket_address;
+  int socket_fd;
+
+  socket_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+  if (socket_fd < 0) {
+    ARROW_LOG(ERROR) << "socket() failed for pathname " << pathname;
+    return -1;
+  }
+
+  memset(&socket_address, 0, sizeof(socket_address));
+  socket_address.sun_family = AF_UNIX;
+  if (pathname.size() + 1 > sizeof(socket_address.sun_path)) {
+    ARROW_LOG(ERROR) << "Socket pathname is too long.";
+    return -1;
+  }
+  strncpy(socket_address.sun_path, pathname.c_str(), pathname.size() + 1);
+
+  if (connect(socket_fd, (struct sockaddr*)&socket_address, sizeof(socket_address)) !=
+      0) {
+    close(socket_fd);
+    return -1;
+  }
+
+  return socket_fd;
+}
+
+int AcceptClient(int socket_fd) {
+  int client_fd = accept(socket_fd, NULL, NULL);
+  if (client_fd < 0) {
+    ARROW_LOG(ERROR) << "Error reading from socket.";
+    return -1;
+  }
+  return client_fd;
+}
+
+uint8_t* read_message_async(int sock) {
+  int64_t size;
+  Status s = ReadBytes(sock, reinterpret_cast<uint8_t*>(&size), sizeof(int64_t));
+  if (!s.ok()) {
+    /* The other side has closed the socket. */
+    ARROW_LOG(DEBUG) << "Socket has been closed, or some other error has occurred.";
+    close(sock);
+    return NULL;
+  }
+  uint8_t* message = reinterpret_cast<uint8_t*>(malloc(size));
+  s = ReadBytes(sock, message, size);
+  if (!s.ok()) {
+    /* The other side has closed the socket. */
+    ARROW_LOG(DEBUG) << "Socket has been closed, or some other error has occurred.";
+    close(sock);
+    return NULL;
+  }
+  return message;
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/io.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/io.h b/cpp/src/plasma/io.h
new file mode 100644
index 0000000..43c3fb5
--- /dev/null
+++ b/cpp/src/plasma/io.h
@@ -0,0 +1,55 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef PLASMA_IO_H
+#define PLASMA_IO_H
+
+#include <inttypes.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include <string>
+#include <vector>
+
+#include "arrow/status.h"
+
+// TODO(pcm): Replace our own custom message header (message type,
+// message length, plasma protocol verion) with one that is serialized
+// using flatbuffers.
+#define PLASMA_PROTOCOL_VERSION 0x0000000000000000
+#define DISCONNECT_CLIENT 0
+
+arrow::Status WriteBytes(int fd, uint8_t* cursor, size_t length);
+
+arrow::Status WriteMessage(int fd, int64_t type, int64_t length, uint8_t* bytes);
+
+arrow::Status ReadBytes(int fd, uint8_t* cursor, size_t length);
+
+arrow::Status ReadMessage(int fd, int64_t* type, std::vector<uint8_t>* buffer);
+
+int bind_ipc_sock(const std::string& pathname, bool shall_listen);
+
+int connect_ipc_sock(const std::string& pathname);
+
+int connect_ipc_sock_retry(const std::string& pathname, int num_retries, int64_t timeout);
+
+int AcceptClient(int socket_fd);
+
+uint8_t* read_message_async(int sock);
+
+#endif  // PLASMA_IO_H

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/malloc.cc
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/malloc.cc b/cpp/src/plasma/malloc.cc
new file mode 100644
index 0000000..e7ffd1a
--- /dev/null
+++ b/cpp/src/plasma/malloc.cc
@@ -0,0 +1,178 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "plasma/malloc.h"
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <unordered_map>
+
+#include "plasma/common.h"
+
+extern "C" {
+void* fake_mmap(size_t);
+int fake_munmap(void*, int64_t);
+
+#define MMAP(s) fake_mmap(s)
+#define MUNMAP(a, s) fake_munmap(a, s)
+#define DIRECT_MMAP(s) fake_mmap(s)
+#define DIRECT_MUNMAP(a, s) fake_munmap(a, s)
+#define USE_DL_PREFIX
+#define HAVE_MORECORE 0
+#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
+#define DEFAULT_GRANULARITY ((size_t)128U * 1024U)
+
+#include "thirdparty/dlmalloc.c"
+
+#undef MMAP
+#undef MUNMAP
+#undef DIRECT_MMAP
+#undef DIRECT_MUNMAP
+#undef USE_DL_PREFIX
+#undef HAVE_MORECORE
+#undef DEFAULT_GRANULARITY
+}
+
+struct mmap_record {
+  int fd;
+  int64_t size;
+};
+
+namespace {
+
+/** Hashtable that contains one entry per segment that we got from the OS
+ *  via mmap. Associates the address of that segment with its file descriptor
+ *  and size. */
+std::unordered_map<void*, mmap_record> mmap_records;
+
+} /* namespace */
+
+constexpr int GRANULARITY_MULTIPLIER = 2;
+
+static void* pointer_advance(void* p, ptrdiff_t n) {
+  return (unsigned char*)p + n;
+}
+
+static void* pointer_retreat(void* p, ptrdiff_t n) {
+  return (unsigned char*)p - n;
+}
+
+static ptrdiff_t pointer_distance(void const* pfrom, void const* pto) {
+  return (unsigned char const*)pto - (unsigned char const*)pfrom;
+}
+
+/* Create a buffer. This is creating a temporary file and then
+ * immediately unlinking it so we do not leave traces in the system. */
+int create_buffer(int64_t size) {
+  int fd;
+#ifdef _WIN32
+  if (!CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE,
+          (DWORD)((uint64_t)size >> (CHAR_BIT * sizeof(DWORD))), (DWORD)(uint64_t)size,
+          NULL)) {
+    fd = -1;
+  }
+#else
+#ifdef __linux__
+  constexpr char file_template[] = "/dev/shm/plasmaXXXXXX";
+#else
+  constexpr char file_template[] = "/tmp/plasmaXXXXXX";
+#endif
+  char file_name[32];
+  strncpy(file_name, file_template, 32);
+  fd = mkstemp(file_name);
+  if (fd < 0) return -1;
+  FILE* file = fdopen(fd, "a+");
+  if (!file) {
+    close(fd);
+    return -1;
+  }
+  if (unlink(file_name) != 0) {
+    ARROW_LOG(FATAL) << "unlink error";
+    return -1;
+  }
+  if (ftruncate(fd, (off_t)size) != 0) {
+    ARROW_LOG(FATAL) << "ftruncate error";
+    return -1;
+  }
+#endif
+  return fd;
+}
+
+void* fake_mmap(size_t size) {
+  /* Add sizeof(size_t) so that the returned pointer is deliberately not
+   * page-aligned. This ensures that the segments of memory returned by
+   * fake_mmap are never contiguous. */
+  size += sizeof(size_t);
+
+  int fd = create_buffer(size);
+  ARROW_CHECK(fd >= 0) << "Failed to create buffer during mmap";
+  void* pointer = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+  if (pointer == MAP_FAILED) { return pointer; }
+
+  /* Increase dlmalloc's allocation granularity directly. */
+  mparams.granularity *= GRANULARITY_MULTIPLIER;
+
+  mmap_record& record = mmap_records[pointer];
+  record.fd = fd;
+  record.size = size;
+
+  /* We lie to dlmalloc about where mapped memory actually lives. */
+  pointer = pointer_advance(pointer, sizeof(size_t));
+  ARROW_LOG(DEBUG) << pointer << " = fake_mmap(" << size << ")";
+  return pointer;
+}
+
+int fake_munmap(void* addr, int64_t size) {
+  ARROW_LOG(DEBUG) << "fake_munmap(" << addr << ", " << size << ")";
+  addr = pointer_retreat(addr, sizeof(size_t));
+  size += sizeof(size_t);
+
+  auto entry = mmap_records.find(addr);
+
+  if (entry == mmap_records.end() || entry->second.size != size) {
+    /* Reject requests to munmap that don't directly match previous
+     * calls to mmap, to prevent dlmalloc from trimming. */
+    return -1;
+  }
+
+  int r = munmap(addr, size);
+  if (r == 0) { close(entry->second.fd); }
+
+  mmap_records.erase(entry);
+  return r;
+}
+
+void get_malloc_mapinfo(void* addr, int* fd, int64_t* map_size, ptrdiff_t* offset) {
+  /* TODO(rshin): Implement a more efficient search through mmap_records. */
+  for (const auto& entry : mmap_records) {
+    if (addr >= entry.first && addr < pointer_advance(entry.first, entry.second.size)) {
+      *fd = entry.second.fd;
+      *map_size = entry.second.size;
+      *offset = pointer_distance(entry.first, addr);
+      return;
+    }
+  }
+  *fd = -1;
+  *map_size = 0;
+  *offset = 0;
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/malloc.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/malloc.h b/cpp/src/plasma/malloc.h
new file mode 100644
index 0000000..b4af2c8
--- /dev/null
+++ b/cpp/src/plasma/malloc.h
@@ -0,0 +1,26 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef PLASMA_MALLOC_H
+#define PLASMA_MALLOC_H
+
+#include <inttypes.h>
+#include <stddef.h>
+
+void get_malloc_mapinfo(void* addr, int* fd, int64_t* map_length, ptrdiff_t* offset);
+
+#endif  // MALLOC_H

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/plasma.cc
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/plasma.cc b/cpp/src/plasma/plasma.cc
new file mode 100644
index 0000000..559d8e7
--- /dev/null
+++ b/cpp/src/plasma/plasma.cc
@@ -0,0 +1,64 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "plasma/plasma.h"
+
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "plasma/common.h"
+#include "plasma/protocol.h"
+
+int warn_if_sigpipe(int status, int client_sock) {
+  if (status >= 0) { return 0; }
+  if (errno == EPIPE || errno == EBADF || errno == ECONNRESET) {
+    ARROW_LOG(WARNING) << "Received SIGPIPE, BAD FILE DESCRIPTOR, or ECONNRESET when "
+                          "sending a message to client on fd "
+                       << client_sock << ". The client on the other end may "
+                                         "have hung up.";
+    return errno;
+  }
+  ARROW_LOG(FATAL) << "Failed to write message to client on fd " << client_sock << ".";
+  return -1;  // This is never reached.
+}
+
+/**
+ * This will create a new ObjectInfo buffer. The first sizeof(int64_t) bytes
+ * of this buffer are the length of the remaining message and the
+ * remaining message is a serialized version of the object info.
+ *
+ * @param object_info The object info to be serialized
+ * @return The object info buffer. It is the caller's responsibility to free
+ *         this buffer with "delete" after it has been used.
+ */
+uint8_t* create_object_info_buffer(ObjectInfoT* object_info) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreateObjectInfo(fbb, object_info);
+  fbb.Finish(message);
+  uint8_t* notification = new uint8_t[sizeof(int64_t) + fbb.GetSize()];
+  *(reinterpret_cast<int64_t*>(notification)) = fbb.GetSize();
+  memcpy(notification + sizeof(int64_t), fbb.GetBufferPointer(), fbb.GetSize());
+  return notification;
+}
+
+ObjectTableEntry* get_object_table_entry(
+    PlasmaStoreInfo* store_info, const ObjectID& object_id) {
+  auto it = store_info->objects.find(object_id);
+  if (it == store_info->objects.end()) { return NULL; }
+  return it->second.get();
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/plasma.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/plasma.h b/cpp/src/plasma/plasma.h
new file mode 100644
index 0000000..275d0c7
--- /dev/null
+++ b/cpp/src/plasma/plasma.h
@@ -0,0 +1,191 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef PLASMA_PLASMA_H
+#define PLASMA_PLASMA_H
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>  // pid_t
+
+#include <unordered_map>
+#include <unordered_set>
+
+#include "arrow/status.h"
+#include "arrow/util/logging.h"
+#include "format/common_generated.h"
+#include "plasma/common.h"
+
+#define HANDLE_SIGPIPE(s, fd_)                                              \
+  do {                                                                      \
+    Status _s = (s);                                                        \
+    if (!_s.ok()) {                                                         \
+      if (errno == EPIPE || errno == EBADF || errno == ECONNRESET) {        \
+        ARROW_LOG(WARNING)                                                  \
+            << "Received SIGPIPE, BAD FILE DESCRIPTOR, or ECONNRESET when " \
+               "sending a message to client on fd "                         \
+            << fd_ << ". "                                                  \
+                      "The client on the other end may have hung up.";      \
+      } else {                                                              \
+        return _s;                                                          \
+      }                                                                     \
+    }                                                                       \
+  } while (0);
+
+/// Allocation granularity used in plasma for object allocation.
+#define BLOCK_SIZE 64
+
+/// Size of object hash digests.
+constexpr int64_t kDigestSize = sizeof(uint64_t);
+
+struct Client;
+
+/// Object request data structure. Used in the plasma_wait_for_objects()
+/// argument.
+typedef struct {
+  /// The ID of the requested object. If ID_NIL request any object.
+  ObjectID object_id;
+  /// Request associated to the object. It can take one of the following values:
+  ///  - PLASMA_QUERY_LOCAL: return if or when the object is available in the
+  ///    local Plasma Store.
+  ///  - PLASMA_QUERY_ANYWHERE: return if or when the object is available in
+  ///    the system (i.e., either in the local or a remote Plasma Store).
+  int type;
+  /// Object status. Same as the status returned by plasma_status() function
+  /// call. This is filled in by plasma_wait_for_objects1():
+  ///  - ObjectStatus_Local: object is ready at the local Plasma Store.
+  ///  - ObjectStatus_Remote: object is ready at a remote Plasma Store.
+  ///  - ObjectStatus_Nonexistent: object does not exist in the system.
+  ///  - PLASMA_CLIENT_IN_TRANSFER, if the object is currently being scheduled
+  ///    for being transferred or it is transferring.
+  int status;
+} ObjectRequest;
+
+/// Mapping from object IDs to type and status of the request.
+typedef std::unordered_map<ObjectID, ObjectRequest, UniqueIDHasher> ObjectRequestMap;
+
+/// Handle to access memory mapped file and map it into client address space.
+typedef struct {
+  /// The file descriptor of the memory mapped file in the store. It is used as
+  /// a unique identifier of the file in the client to look up the corresponding
+  /// file descriptor on the client's side.
+  int store_fd;
+  /// The size in bytes of the memory mapped file.
+  int64_t mmap_size;
+} object_handle;
+
+// TODO(pcm): Replace this by the flatbuffers message PlasmaObjectSpec.
+typedef struct {
+  /// Handle for memory mapped file the object is stored in.
+  object_handle handle;
+  /// The offset in bytes in the memory mapped file of the data.
+  ptrdiff_t data_offset;
+  /// The offset in bytes in the memory mapped file of the metadata.
+  ptrdiff_t metadata_offset;
+  /// The size in bytes of the data.
+  int64_t data_size;
+  /// The size in bytes of the metadata.
+  int64_t metadata_size;
+} PlasmaObject;
+
+typedef enum {
+  /// Object was created but not sealed in the local Plasma Store.
+  PLASMA_CREATED = 1,
+  /// Object is sealed and stored in the local Plasma Store.
+  PLASMA_SEALED
+} object_state;
+
+typedef enum {
+  /// The object was not found.
+  OBJECT_NOT_FOUND = 0,
+  /// The object was found.
+  OBJECT_FOUND = 1
+} object_status;
+
+typedef enum {
+  /// Query for object in the local plasma store.
+  PLASMA_QUERY_LOCAL = 1,
+  /// Query for object in the local plasma store or in a remote plasma store.
+  PLASMA_QUERY_ANYWHERE
+} object_request_type;
+
+/// This type is used by the Plasma store. It is here because it is exposed to
+/// the eviction policy.
+struct ObjectTableEntry {
+  /// Object id of this object.
+  ObjectID object_id;
+  /// Object info like size, creation time and owner.
+  ObjectInfoT info;
+  /// Memory mapped file containing the object.
+  int fd;
+  /// Size of the underlying map.
+  int64_t map_size;
+  /// Offset from the base of the mmap.
+  ptrdiff_t offset;
+  /// Pointer to the object data. Needed to free the object.
+  uint8_t* pointer;
+  /// Set of clients currently using this object.
+  std::unordered_set<Client*> clients;
+  /// The state of the object, e.g., whether it is open or sealed.
+  object_state state;
+  /// The digest of the object. Used to see if two objects are the same.
+  unsigned char digest[kDigestSize];
+};
+
+/// The plasma store information that is exposed to the eviction policy.
+struct PlasmaStoreInfo {
+  /// Objects that are in the Plasma store.
+  std::unordered_map<ObjectID, std::unique_ptr<ObjectTableEntry>, UniqueIDHasher> objects;
+  /// The amount of memory (in bytes) that we allow to be allocated in the
+  /// store.
+  int64_t memory_capacity;
+};
+
+/// Get an entry from the object table and return NULL if the object_id
+/// is not present.
+///
+/// @param store_info The PlasmaStoreInfo that contains the object table.
+/// @param object_id The object_id of the entry we are looking for.
+/// @return The entry associated with the object_id or NULL if the object_id
+///         is not present.
+ObjectTableEntry* get_object_table_entry(
+    PlasmaStoreInfo* store_info, const ObjectID& object_id);
+
+/// Print a warning if the status is less than zero. This should be used to check
+/// the success of messages sent to plasma clients. We print a warning instead of
+/// failing because the plasma clients are allowed to die. This is used to handle
+/// situations where the store writes to a client file descriptor, and the client
+/// may already have disconnected. If we have processed the disconnection and
+/// closed the file descriptor, we should get a BAD FILE DESCRIPTOR error. If we
+/// have not, then we should get a SIGPIPE. If we write to a TCP socket that
+/// isn't connected yet, then we should get an ECONNRESET.
+///
+/// @param status The status to check. If it is less less than zero, we will
+///        print a warning.
+/// @param client_sock The client socket. This is just used to print some extra
+///        information.
+/// @return The errno set.
+int warn_if_sigpipe(int status, int client_sock);
+
+uint8_t* create_object_info_buffer(ObjectInfoT* object_info);
+
+#endif  // PLASMA_PLASMA_H

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/protocol.cc
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/protocol.cc b/cpp/src/plasma/protocol.cc
new file mode 100644
index 0000000..246aa29
--- /dev/null
+++ b/cpp/src/plasma/protocol.cc
@@ -0,0 +1,502 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "plasma/protocol.h"
+
+#include "flatbuffers/flatbuffers.h"
+#include "format/plasma_generated.h"
+
+#include "plasma/common.h"
+#include "plasma/io.h"
+
+using flatbuffers::uoffset_t;
+
+flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
+to_flatbuffer(flatbuffers::FlatBufferBuilder* fbb, const ObjectID* object_ids,
+    int64_t num_objects) {
+  std::vector<flatbuffers::Offset<flatbuffers::String>> results;
+  for (int64_t i = 0; i < num_objects; i++) {
+    results.push_back(fbb->CreateString(object_ids[i].binary()));
+  }
+  return fbb->CreateVector(results);
+}
+
+Status PlasmaReceive(int sock, int64_t message_type, std::vector<uint8_t>* buffer) {
+  int64_t type;
+  RETURN_NOT_OK(ReadMessage(sock, &type, buffer));
+  ARROW_CHECK(type == message_type) << "type = " << type
+                                    << ", message_type = " << message_type;
+  return Status::OK();
+}
+
+template <typename Message>
+Status PlasmaSend(int sock, int64_t message_type, flatbuffers::FlatBufferBuilder* fbb,
+    const Message& message) {
+  fbb->Finish(message);
+  return WriteMessage(sock, message_type, fbb->GetSize(), fbb->GetBufferPointer());
+}
+
+// Create messages.
+
+Status SendCreateRequest(
+    int sock, ObjectID object_id, int64_t data_size, int64_t metadata_size) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreatePlasmaCreateRequest(
+      fbb, fbb.CreateString(object_id.binary()), data_size, metadata_size);
+  return PlasmaSend(sock, MessageType_PlasmaCreateRequest, &fbb, message);
+}
+
+Status ReadCreateRequest(
+    uint8_t* data, ObjectID* object_id, int64_t* data_size, int64_t* metadata_size) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaCreateRequest>(data);
+  *data_size = message->data_size();
+  *metadata_size = message->metadata_size();
+  *object_id = ObjectID::from_binary(message->object_id()->str());
+  return Status::OK();
+}
+
+Status SendCreateReply(
+    int sock, ObjectID object_id, PlasmaObject* object, int error_code) {
+  flatbuffers::FlatBufferBuilder fbb;
+  PlasmaObjectSpec plasma_object(object->handle.store_fd, object->handle.mmap_size,
+      object->data_offset, object->data_size, object->metadata_offset,
+      object->metadata_size);
+  auto message = CreatePlasmaCreateReply(
+      fbb, fbb.CreateString(object_id.binary()), &plasma_object, (PlasmaError)error_code);
+  return PlasmaSend(sock, MessageType_PlasmaCreateReply, &fbb, message);
+}
+
+Status ReadCreateReply(uint8_t* data, ObjectID* object_id, PlasmaObject* object) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaCreateReply>(data);
+  *object_id = ObjectID::from_binary(message->object_id()->str());
+  object->handle.store_fd = message->plasma_object()->segment_index();
+  object->handle.mmap_size = message->plasma_object()->mmap_size();
+  object->data_offset = message->plasma_object()->data_offset();
+  object->data_size = message->plasma_object()->data_size();
+  object->metadata_offset = message->plasma_object()->metadata_offset();
+  object->metadata_size = message->plasma_object()->metadata_size();
+  return plasma_error_status(message->error());
+}
+
+// Seal messages.
+
+Status SendSealRequest(int sock, ObjectID object_id, unsigned char* digest) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto digest_string = fbb.CreateString(reinterpret_cast<char*>(digest), kDigestSize);
+  auto message =
+      CreatePlasmaSealRequest(fbb, fbb.CreateString(object_id.binary()), digest_string);
+  return PlasmaSend(sock, MessageType_PlasmaSealRequest, &fbb, message);
+}
+
+Status ReadSealRequest(uint8_t* data, ObjectID* object_id, unsigned char* digest) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaSealRequest>(data);
+  *object_id = ObjectID::from_binary(message->object_id()->str());
+  ARROW_CHECK(message->digest()->size() == kDigestSize);
+  memcpy(digest, message->digest()->data(), kDigestSize);
+  return Status::OK();
+}
+
+Status SendSealReply(int sock, ObjectID object_id, int error) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreatePlasmaSealReply(
+      fbb, fbb.CreateString(object_id.binary()), (PlasmaError)error);
+  return PlasmaSend(sock, MessageType_PlasmaSealReply, &fbb, message);
+}
+
+Status ReadSealReply(uint8_t* data, ObjectID* object_id) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaSealReply>(data);
+  *object_id = ObjectID::from_binary(message->object_id()->str());
+  return plasma_error_status(message->error());
+}
+
+// Release messages.
+
+Status SendReleaseRequest(int sock, ObjectID object_id) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreatePlasmaSealRequest(fbb, fbb.CreateString(object_id.binary()));
+  return PlasmaSend(sock, MessageType_PlasmaReleaseRequest, &fbb, message);
+}
+
+Status ReadReleaseRequest(uint8_t* data, ObjectID* object_id) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaReleaseRequest>(data);
+  *object_id = ObjectID::from_binary(message->object_id()->str());
+  return Status::OK();
+}
+
+Status SendReleaseReply(int sock, ObjectID object_id, int error) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreatePlasmaReleaseReply(
+      fbb, fbb.CreateString(object_id.binary()), (PlasmaError)error);
+  return PlasmaSend(sock, MessageType_PlasmaReleaseReply, &fbb, message);
+}
+
+Status ReadReleaseReply(uint8_t* data, ObjectID* object_id) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaReleaseReply>(data);
+  *object_id = ObjectID::from_binary(message->object_id()->str());
+  return plasma_error_status(message->error());
+}
+
+// Delete messages.
+
+Status SendDeleteRequest(int sock, ObjectID object_id) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreatePlasmaDeleteRequest(fbb, fbb.CreateString(object_id.binary()));
+  return PlasmaSend(sock, MessageType_PlasmaDeleteRequest, &fbb, message);
+}
+
+Status ReadDeleteRequest(uint8_t* data, ObjectID* object_id) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaReleaseReply>(data);
+  *object_id = ObjectID::from_binary(message->object_id()->str());
+  return Status::OK();
+}
+
+Status SendDeleteReply(int sock, ObjectID object_id, int error) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreatePlasmaDeleteReply(
+      fbb, fbb.CreateString(object_id.binary()), (PlasmaError)error);
+  return PlasmaSend(sock, MessageType_PlasmaDeleteReply, &fbb, message);
+}
+
+Status ReadDeleteReply(uint8_t* data, ObjectID* object_id) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaDeleteReply>(data);
+  *object_id = ObjectID::from_binary(message->object_id()->str());
+  return plasma_error_status(message->error());
+}
+
+// Satus messages.
+
+Status SendStatusRequest(int sock, const ObjectID* object_ids, int64_t num_objects) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message =
+      CreatePlasmaStatusRequest(fbb, to_flatbuffer(&fbb, object_ids, num_objects));
+  return PlasmaSend(sock, MessageType_PlasmaStatusRequest, &fbb, message);
+}
+
+Status ReadStatusRequest(uint8_t* data, ObjectID object_ids[], int64_t num_objects) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaStatusRequest>(data);
+  for (uoffset_t i = 0; i < num_objects; ++i) {
+    object_ids[i] = ObjectID::from_binary(message->object_ids()->Get(i)->str());
+  }
+  return Status::OK();
+}
+
+Status SendStatusReply(
+    int sock, ObjectID object_ids[], int object_status[], int64_t num_objects) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message =
+      CreatePlasmaStatusReply(fbb, to_flatbuffer(&fbb, object_ids, num_objects),
+          fbb.CreateVector(object_status, num_objects));
+  return PlasmaSend(sock, MessageType_PlasmaStatusReply, &fbb, message);
+}
+
+int64_t ReadStatusReply_num_objects(uint8_t* data) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaStatusReply>(data);
+  return message->object_ids()->size();
+}
+
+Status ReadStatusReply(
+    uint8_t* data, ObjectID object_ids[], int object_status[], int64_t num_objects) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaStatusReply>(data);
+  for (uoffset_t i = 0; i < num_objects; ++i) {
+    object_ids[i] = ObjectID::from_binary(message->object_ids()->Get(i)->str());
+  }
+  for (uoffset_t i = 0; i < num_objects; ++i) {
+    object_status[i] = message->status()->data()[i];
+  }
+  return Status::OK();
+}
+
+// Contains messages.
+
+Status SendContainsRequest(int sock, ObjectID object_id) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreatePlasmaContainsRequest(fbb, fbb.CreateString(object_id.binary()));
+  return PlasmaSend(sock, MessageType_PlasmaContainsRequest, &fbb, message);
+}
+
+Status ReadContainsRequest(uint8_t* data, ObjectID* object_id) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaContainsRequest>(data);
+  *object_id = ObjectID::from_binary(message->object_id()->str());
+  return Status::OK();
+}
+
+Status SendContainsReply(int sock, ObjectID object_id, bool has_object) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message =
+      CreatePlasmaContainsReply(fbb, fbb.CreateString(object_id.binary()), has_object);
+  return PlasmaSend(sock, MessageType_PlasmaContainsReply, &fbb, message);
+}
+
+Status ReadContainsReply(uint8_t* data, ObjectID* object_id, bool* has_object) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaContainsReply>(data);
+  *object_id = ObjectID::from_binary(message->object_id()->str());
+  *has_object = message->has_object();
+  return Status::OK();
+}
+
+// Connect messages.
+
+Status SendConnectRequest(int sock) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreatePlasmaConnectRequest(fbb);
+  return PlasmaSend(sock, MessageType_PlasmaConnectRequest, &fbb, message);
+}
+
+Status ReadConnectRequest(uint8_t* data) {
+  return Status::OK();
+}
+
+Status SendConnectReply(int sock, int64_t memory_capacity) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreatePlasmaConnectReply(fbb, memory_capacity);
+  return PlasmaSend(sock, MessageType_PlasmaConnectReply, &fbb, message);
+}
+
+Status ReadConnectReply(uint8_t* data, int64_t* memory_capacity) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaConnectReply>(data);
+  *memory_capacity = message->memory_capacity();
+  return Status::OK();
+}
+
+// Evict messages.
+
+Status SendEvictRequest(int sock, int64_t num_bytes) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreatePlasmaEvictRequest(fbb, num_bytes);
+  return PlasmaSend(sock, MessageType_PlasmaEvictRequest, &fbb, message);
+}
+
+Status ReadEvictRequest(uint8_t* data, int64_t* num_bytes) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaEvictRequest>(data);
+  *num_bytes = message->num_bytes();
+  return Status::OK();
+}
+
+Status SendEvictReply(int sock, int64_t num_bytes) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreatePlasmaEvictReply(fbb, num_bytes);
+  return PlasmaSend(sock, MessageType_PlasmaEvictReply, &fbb, message);
+}
+
+Status ReadEvictReply(uint8_t* data, int64_t& num_bytes) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaEvictReply>(data);
+  num_bytes = message->num_bytes();
+  return Status::OK();
+}
+
+// Get messages.
+
+Status SendGetRequest(
+    int sock, const ObjectID* object_ids, int64_t num_objects, int64_t timeout_ms) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreatePlasmaGetRequest(
+      fbb, to_flatbuffer(&fbb, object_ids, num_objects), timeout_ms);
+  return PlasmaSend(sock, MessageType_PlasmaGetRequest, &fbb, message);
+}
+
+Status ReadGetRequest(
+    uint8_t* data, std::vector<ObjectID>& object_ids, int64_t* timeout_ms) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaGetRequest>(data);
+  for (uoffset_t i = 0; i < message->object_ids()->size(); ++i) {
+    auto object_id = message->object_ids()->Get(i)->str();
+    object_ids.push_back(ObjectID::from_binary(object_id));
+  }
+  *timeout_ms = message->timeout_ms();
+  return Status::OK();
+}
+
+Status SendGetReply(int sock, ObjectID object_ids[],
+    std::unordered_map<ObjectID, PlasmaObject, UniqueIDHasher>& plasma_objects,
+    int64_t num_objects) {
+  flatbuffers::FlatBufferBuilder fbb;
+  std::vector<PlasmaObjectSpec> objects;
+
+  for (int i = 0; i < num_objects; ++i) {
+    const PlasmaObject& object = plasma_objects[object_ids[i]];
+    objects.push_back(PlasmaObjectSpec(object.handle.store_fd, object.handle.mmap_size,
+        object.data_offset, object.data_size, object.metadata_offset,
+        object.metadata_size));
+  }
+  auto message = CreatePlasmaGetReply(fbb, to_flatbuffer(&fbb, object_ids, num_objects),
+      fbb.CreateVectorOfStructs(objects.data(), num_objects));
+  return PlasmaSend(sock, MessageType_PlasmaGetReply, &fbb, message);
+}
+
+Status ReadGetReply(uint8_t* data, ObjectID object_ids[], PlasmaObject plasma_objects[],
+    int64_t num_objects) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaGetReply>(data);
+  for (uoffset_t i = 0; i < num_objects; ++i) {
+    object_ids[i] = ObjectID::from_binary(message->object_ids()->Get(i)->str());
+  }
+  for (uoffset_t i = 0; i < num_objects; ++i) {
+    const PlasmaObjectSpec* object = message->plasma_objects()->Get(i);
+    plasma_objects[i].handle.store_fd = object->segment_index();
+    plasma_objects[i].handle.mmap_size = object->mmap_size();
+    plasma_objects[i].data_offset = object->data_offset();
+    plasma_objects[i].data_size = object->data_size();
+    plasma_objects[i].metadata_offset = object->metadata_offset();
+    plasma_objects[i].metadata_size = object->metadata_size();
+  }
+  return Status::OK();
+}
+
+// Fetch messages.
+
+Status SendFetchRequest(int sock, const ObjectID* object_ids, int64_t num_objects) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message =
+      CreatePlasmaFetchRequest(fbb, to_flatbuffer(&fbb, object_ids, num_objects));
+  return PlasmaSend(sock, MessageType_PlasmaFetchRequest, &fbb, message);
+}
+
+Status ReadFetchRequest(uint8_t* data, std::vector<ObjectID>& object_ids) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaFetchRequest>(data);
+  for (uoffset_t i = 0; i < message->object_ids()->size(); ++i) {
+    object_ids.push_back(ObjectID::from_binary(message->object_ids()->Get(i)->str()));
+  }
+  return Status::OK();
+}
+
+// Wait messages.
+
+Status SendWaitRequest(int sock, ObjectRequest object_requests[], int64_t num_requests,
+    int num_ready_objects, int64_t timeout_ms) {
+  flatbuffers::FlatBufferBuilder fbb;
+
+  std::vector<flatbuffers::Offset<ObjectRequestSpec>> object_request_specs;
+  for (int i = 0; i < num_requests; i++) {
+    object_request_specs.push_back(CreateObjectRequestSpec(fbb,
+        fbb.CreateString(object_requests[i].object_id.binary()),
+        object_requests[i].type));
+  }
+
+  auto message = CreatePlasmaWaitRequest(
+      fbb, fbb.CreateVector(object_request_specs), num_ready_objects, timeout_ms);
+  return PlasmaSend(sock, MessageType_PlasmaWaitRequest, &fbb, message);
+}
+
+Status ReadWaitRequest(uint8_t* data, ObjectRequestMap& object_requests,
+    int64_t* timeout_ms, int* num_ready_objects) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaWaitRequest>(data);
+  *num_ready_objects = message->num_ready_objects();
+  *timeout_ms = message->timeout();
+
+  for (uoffset_t i = 0; i < message->object_requests()->size(); i++) {
+    ObjectID object_id =
+        ObjectID::from_binary(message->object_requests()->Get(i)->object_id()->str());
+    ObjectRequest object_request({object_id, message->object_requests()->Get(i)->type(),
+        ObjectStatus_Nonexistent});
+    object_requests[object_id] = object_request;
+  }
+  return Status::OK();
+}
+
+Status SendWaitReply(
+    int sock, const ObjectRequestMap& object_requests, int num_ready_objects) {
+  flatbuffers::FlatBufferBuilder fbb;
+
+  std::vector<flatbuffers::Offset<ObjectReply>> object_replies;
+  for (const auto& entry : object_requests) {
+    const auto& object_request = entry.second;
+    object_replies.push_back(CreateObjectReply(
+        fbb, fbb.CreateString(object_request.object_id.binary()), object_request.status));
+  }
+
+  auto message = CreatePlasmaWaitReply(
+      fbb, fbb.CreateVector(object_replies.data(), num_ready_objects), num_ready_objects);
+  return PlasmaSend(sock, MessageType_PlasmaWaitReply, &fbb, message);
+}
+
+Status ReadWaitReply(
+    uint8_t* data, ObjectRequest object_requests[], int* num_ready_objects) {
+  DCHECK(data);
+
+  auto message = flatbuffers::GetRoot<PlasmaWaitReply>(data);
+  *num_ready_objects = message->num_ready_objects();
+  for (int i = 0; i < *num_ready_objects; i++) {
+    object_requests[i].object_id =
+        ObjectID::from_binary(message->object_requests()->Get(i)->object_id()->str());
+    object_requests[i].status = message->object_requests()->Get(i)->status();
+  }
+  return Status::OK();
+}
+
+// Subscribe messages.
+
+Status SendSubscribeRequest(int sock) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreatePlasmaSubscribeRequest(fbb);
+  return PlasmaSend(sock, MessageType_PlasmaSubscribeRequest, &fbb, message);
+}
+
+// Data messages.
+
+Status SendDataRequest(int sock, ObjectID object_id, const char* address, int port) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto addr = fbb.CreateString(address, strlen(address));
+  auto message =
+      CreatePlasmaDataRequest(fbb, fbb.CreateString(object_id.binary()), addr, port);
+  return PlasmaSend(sock, MessageType_PlasmaDataRequest, &fbb, message);
+}
+
+Status ReadDataRequest(uint8_t* data, ObjectID* object_id, char** address, int* port) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaDataRequest>(data);
+  DCHECK(message->object_id()->size() == sizeof(ObjectID));
+  *object_id = ObjectID::from_binary(message->object_id()->str());
+  *address = strdup(message->address()->c_str());
+  *port = message->port();
+  return Status::OK();
+}
+
+Status SendDataReply(
+    int sock, ObjectID object_id, int64_t object_size, int64_t metadata_size) {
+  flatbuffers::FlatBufferBuilder fbb;
+  auto message = CreatePlasmaDataReply(
+      fbb, fbb.CreateString(object_id.binary()), object_size, metadata_size);
+  return PlasmaSend(sock, MessageType_PlasmaDataReply, &fbb, message);
+}
+
+Status ReadDataReply(
+    uint8_t* data, ObjectID* object_id, int64_t* object_size, int64_t* metadata_size) {
+  DCHECK(data);
+  auto message = flatbuffers::GetRoot<PlasmaDataReply>(data);
+  *object_id = ObjectID::from_binary(message->object_id()->str());
+  *object_size = (int64_t)message->object_size();
+  *metadata_size = (int64_t)message->metadata_size();
+  return Status::OK();
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/protocol.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/protocol.h b/cpp/src/plasma/protocol.h
new file mode 100644
index 0000000..5d9d136
--- /dev/null
+++ b/cpp/src/plasma/protocol.h
@@ -0,0 +1,170 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef PLASMA_PROTOCOL_H
+#define PLASMA_PROTOCOL_H
+
+#include <vector>
+
+#include "arrow/status.h"
+#include "format/plasma_generated.h"
+#include "plasma/plasma.h"
+
+using arrow::Status;
+
+/* Plasma receive message. */
+
+Status PlasmaReceive(int sock, int64_t message_type, std::vector<uint8_t>* buffer);
+
+/* Plasma Create message functions. */
+
+Status SendCreateRequest(
+    int sock, ObjectID object_id, int64_t data_size, int64_t metadata_size);
+
+Status ReadCreateRequest(
+    uint8_t* data, ObjectID* object_id, int64_t* data_size, int64_t* metadata_size);
+
+Status SendCreateReply(int sock, ObjectID object_id, PlasmaObject* object, int error);
+
+Status ReadCreateReply(uint8_t* data, ObjectID* object_id, PlasmaObject* object);
+
+/* Plasma Seal message functions. */
+
+Status SendSealRequest(int sock, ObjectID object_id, unsigned char* digest);
+
+Status ReadSealRequest(uint8_t* data, ObjectID* object_id, unsigned char* digest);
+
+Status SendSealReply(int sock, ObjectID object_id, int error);
+
+Status ReadSealReply(uint8_t* data, ObjectID* object_id);
+
+/* Plasma Get message functions. */
+
+Status SendGetRequest(
+    int sock, const ObjectID* object_ids, int64_t num_objects, int64_t timeout_ms);
+
+Status ReadGetRequest(
+    uint8_t* data, std::vector<ObjectID>& object_ids, int64_t* timeout_ms);
+
+Status SendGetReply(int sock, ObjectID object_ids[],
+    std::unordered_map<ObjectID, PlasmaObject, UniqueIDHasher>& plasma_objects,
+    int64_t num_objects);
+
+Status ReadGetReply(uint8_t* data, ObjectID object_ids[], PlasmaObject plasma_objects[],
+    int64_t num_objects);
+
+/* Plasma Release message functions. */
+
+Status SendReleaseRequest(int sock, ObjectID object_id);
+
+Status ReadReleaseRequest(uint8_t* data, ObjectID* object_id);
+
+Status SendReleaseReply(int sock, ObjectID object_id, int error);
+
+Status ReadReleaseReply(uint8_t* data, ObjectID* object_id);
+
+/* Plasma Delete message functions. */
+
+Status SendDeleteRequest(int sock, ObjectID object_id);
+
+Status ReadDeleteRequest(uint8_t* data, ObjectID* object_id);
+
+Status SendDeleteReply(int sock, ObjectID object_id, int error);
+
+Status ReadDeleteReply(uint8_t* data, ObjectID* object_id);
+
+/* Satus messages. */
+
+Status SendStatusRequest(int sock, const ObjectID* object_ids, int64_t num_objects);
+
+Status ReadStatusRequest(uint8_t* data, ObjectID object_ids[], int64_t num_objects);
+
+Status SendStatusReply(
+    int sock, ObjectID object_ids[], int object_status[], int64_t num_objects);
+
+int64_t ReadStatusReply_num_objects(uint8_t* data);
+
+Status ReadStatusReply(
+    uint8_t* data, ObjectID object_ids[], int object_status[], int64_t num_objects);
+
+/* Plasma Constains message functions. */
+
+Status SendContainsRequest(int sock, ObjectID object_id);
+
+Status ReadContainsRequest(uint8_t* data, ObjectID* object_id);
+
+Status SendContainsReply(int sock, ObjectID object_id, bool has_object);
+
+Status ReadContainsReply(uint8_t* data, ObjectID* object_id, bool* has_object);
+
+/* Plasma Connect message functions. */
+
+Status SendConnectRequest(int sock);
+
+Status ReadConnectRequest(uint8_t* data);
+
+Status SendConnectReply(int sock, int64_t memory_capacity);
+
+Status ReadConnectReply(uint8_t* data, int64_t* memory_capacity);
+
+/* Plasma Evict message functions (no reply so far). */
+
+Status SendEvictRequest(int sock, int64_t num_bytes);
+
+Status ReadEvictRequest(uint8_t* data, int64_t* num_bytes);
+
+Status SendEvictReply(int sock, int64_t num_bytes);
+
+Status ReadEvictReply(uint8_t* data, int64_t& num_bytes);
+
+/* Plasma Fetch Remote message functions. */
+
+Status SendFetchRequest(int sock, const ObjectID* object_ids, int64_t num_objects);
+
+Status ReadFetchRequest(uint8_t* data, std::vector<ObjectID>& object_ids);
+
+/* Plasma Wait message functions. */
+
+Status SendWaitRequest(int sock, ObjectRequest object_requests[], int64_t num_requests,
+    int num_ready_objects, int64_t timeout_ms);
+
+Status ReadWaitRequest(uint8_t* data, ObjectRequestMap& object_requests,
+    int64_t* timeout_ms, int* num_ready_objects);
+
+Status SendWaitReply(
+    int sock, const ObjectRequestMap& object_requests, int num_ready_objects);
+
+Status ReadWaitReply(
+    uint8_t* data, ObjectRequest object_requests[], int* num_ready_objects);
+
+/* Plasma Subscribe message functions. */
+
+Status SendSubscribeRequest(int sock);
+
+/* Data messages. */
+
+Status SendDataRequest(int sock, ObjectID object_id, const char* address, int port);
+
+Status ReadDataRequest(uint8_t* data, ObjectID* object_id, char** address, int* port);
+
+Status SendDataReply(
+    int sock, ObjectID object_id, int64_t object_size, int64_t metadata_size);
+
+Status ReadDataReply(
+    uint8_t* data, ObjectID* object_id, int64_t* object_size, int64_t* metadata_size);
+
+#endif /* PLASMA_PROTOCOL */


[6/6] arrow git commit: ARROW-1104: Integrate in-memory object store into arrow

Posted by we...@apache.org.
ARROW-1104: Integrate in-memory object store into arrow

This supersedes https://github.com/apache/arrow/pull/467

This is ready for review. Next steps are
- Integration with the arrow CI
- Write docs on how to use the object store

There is one remaining compilation error (it doesn't find Python.h for one of the Travis configurations, if anybody has an idea on what is going on, let me know).

Author: Philipp Moritz <pc...@gmail.com>
Author: Robert Nishihara <ro...@gmail.com>

Closes #742 from pcmoritz/plasma-store-2 and squashes the following commits:

c100a453 [Philipp Moritz] fixes
d67160c5 [Philipp Moritz] build dlmalloc with -O3
16d1f716 [Philipp Moritz] fix test hanging
0f321e16 [Philipp Moritz] try to fix tests
80f9df40 [Philipp Moritz] make format
4c474d71 [Philipp Moritz] run plasma_store from the right directory
85aa1710 [Philipp Moritz] fix mac tests
61d421b5 [Philipp Moritz] fix formatting
4497e337 [Philipp Moritz] fix tests
00f17f24 [Philipp Moritz] fix licenses
81437920 [Philipp Moritz] fix linting
5370ae06 [Philipp Moritz] fix plasma protocol
a137e783 [Philipp Moritz] more fixes
b36c6aaa [Philipp Moritz] fix fling.cc
214c426c [Philipp Moritz] fix eviction policy
e7badc48 [Philipp Moritz] fix python extension
6432d3fa [Philipp Moritz] fix formatting
b21f0814 [Philipp Moritz] fix remaining comments about client
27f9c9e8 [Philipp Moritz] fix formatting
7b08fd2a [Philipp Moritz] replace ObjectID pass by value with pass by const reference and fix const correctness
ca80e9a6 [Philipp Moritz] remove plain pointer in plasma client, part II
627b7c75 [Philipp Moritz] fix python extension name
30bd68b7 [Philipp Moritz] remove plain pointer in plasma client, part I
77d98227 [Philipp Moritz] put all the object code into a common library
0fdd4cd5 [Philipp Moritz] link libarrow.a and remove hardcoded optimization flags
8daea699 [Philipp Moritz] fix includes according to google styleguide
65ac7433 [Philipp Moritz] remove offending c++ flag from c flags
7003a4a4 [Philipp Moritz] fix valgrind test by setting working directory
217ff3d8 [Philipp Moritz] add valgrind heuristic
9c703c20 [Philipp Moritz] integrate client tests
9e5ae0e1 [Philipp Moritz] port serialization tests to gtest
0b8593db [Robert Nishihara] Port change from Ray. Change listen backlog size from 5 to 128.
b9a5a06e [Philipp Moritz] fix includes
ed680f97 [Philipp Moritz] reformat the code
f40f85bd [Philipp Moritz] add clang-format exceptions
d6e60d26 [Philipp Moritz] do not compile plasma on windows
f936adb7 [Philipp Moritz] build plasma python client only if python is available
e11b0e86 [Philipp Moritz] fix pthread
74ecb199 [Philipp Moritz] don't link against Python libraries
b1e0335a [Philipp Moritz] fix linting
7f7e7e78 [Philipp Moritz] more linting
79ea0ca7 [Philipp Moritz] fix clang-tidy
99420e8f [Philipp Moritz] add rat exceptions
6cee1e25 [Philipp Moritz] fix
c93034fb [Philipp Moritz] add Apache 2.0 headers
63729130 [Philipp Moritz] fix malloc?
99537c94 [Philipp Moritz] fix compiler warnings
cb3f3a38 [Philipp Moritz] compile C files with CMAKE_C_FLAGS
e649c2af [Philipp Moritz] fix compilation
04c2edb3 [Philipp Moritz] add missing file
51ab9630 [Philipp Moritz] fix compiler warnings
9ef7f412 [Philipp Moritz] make the plasma store compile
e9f9bb4a [Philipp Moritz] Initial commit of the plasma store. Contributors: Philipp Moritz, Robert Nishihara, Richard Shin, Stephanie Wang, Alexey Tumanov, Ion Stoica @ RISElab, UC Berkeley (2017) [from https://github.com/ray-project/ray/commit/b94b4a35e04d8d2c0af4420518a4e9a94c1c9b9f]


Project: http://git-wip-us.apache.org/repos/asf/arrow/repo
Commit: http://git-wip-us.apache.org/repos/asf/arrow/commit/5e343098
Tree: http://git-wip-us.apache.org/repos/asf/arrow/tree/5e343098
Diff: http://git-wip-us.apache.org/repos/asf/arrow/diff/5e343098

Branch: refs/heads/master
Commit: 5e343098187cb822017f359748e28c53ece70e75
Parents: ef579ca
Author: Philipp Moritz <pc...@gmail.com>
Authored: Thu Jun 22 09:35:34 2017 -0400
Committer: Wes McKinney <we...@twosigma.com>
Committed: Thu Jun 22 09:35:34 2017 -0400

----------------------------------------------------------------------
 LICENSE.txt                                |   96 +
 ci/travis_before_script_cpp.sh             |    5 +-
 ci/travis_script_python.sh                 |    1 +
 cpp/CMakeLists.txt                         |   20 +-
 cpp/src/arrow/status.h                     |   23 +
 cpp/src/arrow/util/logging.h               |    8 +-
 cpp/src/plasma/CMakeLists.txt              |  112 +
 cpp/src/plasma/client.cc                   |  557 ++
 cpp/src/plasma/client.h                    |  343 ++
 cpp/src/plasma/common.cc                   |   83 +
 cpp/src/plasma/common.h                    |   63 +
 cpp/src/plasma/events.cc                   |   81 +
 cpp/src/plasma/events.h                    |   99 +
 cpp/src/plasma/eviction_policy.cc          |  107 +
 cpp/src/plasma/eviction_policy.h           |  134 +
 cpp/src/plasma/extension.cc                |  456 ++
 cpp/src/plasma/extension.h                 |   50 +
 cpp/src/plasma/fling.cc                    |   90 +
 cpp/src/plasma/fling.h                     |   52 +
 cpp/src/plasma/format/common.fbs           |   34 +
 cpp/src/plasma/format/plasma.fbs           |  291 ++
 cpp/src/plasma/io.cc                       |  212 +
 cpp/src/plasma/io.h                        |   55 +
 cpp/src/plasma/malloc.cc                   |  178 +
 cpp/src/plasma/malloc.h                    |   26 +
 cpp/src/plasma/plasma.cc                   |   64 +
 cpp/src/plasma/plasma.h                    |  191 +
 cpp/src/plasma/protocol.cc                 |  502 ++
 cpp/src/plasma/protocol.h                  |  170 +
 cpp/src/plasma/store.cc                    |  681 +++
 cpp/src/plasma/store.h                     |  169 +
 cpp/src/plasma/test/client_tests.cc        |  132 +
 cpp/src/plasma/test/run_tests.sh           |   61 +
 cpp/src/plasma/test/run_valgrind.sh        |   27 +
 cpp/src/plasma/test/serialization_tests.cc |  388 ++
 cpp/src/plasma/thirdparty/ae/ae.c          |  465 ++
 cpp/src/plasma/thirdparty/ae/ae.h          |  123 +
 cpp/src/plasma/thirdparty/ae/ae_epoll.c    |  135 +
 cpp/src/plasma/thirdparty/ae/ae_evport.c   |  320 ++
 cpp/src/plasma/thirdparty/ae/ae_kqueue.c   |  138 +
 cpp/src/plasma/thirdparty/ae/ae_select.c   |  106 +
 cpp/src/plasma/thirdparty/ae/config.h      |   54 +
 cpp/src/plasma/thirdparty/ae/zmalloc.h     |   45 +
 cpp/src/plasma/thirdparty/dlmalloc.c       | 6281 +++++++++++++++++++++++
 cpp/src/plasma/thirdparty/xxhash.cc        |  889 ++++
 cpp/src/plasma/thirdparty/xxhash.h         |  293 ++
 dev/release/run-rat.sh                     |   11 +
 47 files changed, 14411 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/LICENSE.txt
----------------------------------------------------------------------
diff --git a/LICENSE.txt b/LICENSE.txt
index d645695..7000733 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -200,3 +200,99 @@
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
+
+--------------------------------------------------------------------------------
+
+src/plasma/fling.cc and src/plasma/fling.h: Apache 2.0
+
+Copyright 2013 Sharvil Nanavati
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+--------------------------------------------------------------------------------
+
+src/plasma/thirdparty/ae: Modified / 3-Clause BSD
+
+Copyright (c) 2006-2010, Salvatore Sanfilippo <antirez at gmail dot com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+ * Neither the name of Redis nor the names of its contributors may be used
+   to endorse or promote products derived from this software without
+   specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
+
+src/plasma/thirdparty/dlmalloc.c: CC0
+
+This is a version (aka dlmalloc) of malloc/free/realloc written by
+Doug Lea and released to the public domain, as explained at
+http://creativecommons.org/publicdomain/zero/1.0/ Send questions,
+comments, complaints, performance data, etc to dl@cs.oswego.edu
+
+--------------------------------------------------------------------------------
+
+src/plasma/thirdparty/xxhash: BSD 2-Clause License
+
+xxHash - Fast Hash algorithm
+Copyright (C) 2012-2016, Yann Collet
+
+BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+You can contact the author at :
+- xxHash homepage: http://www.xxhash.com
+- xxHash source repository : https://github.com/Cyan4973/xxHash
+
+--------------------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/ci/travis_before_script_cpp.sh
----------------------------------------------------------------------
diff --git a/ci/travis_before_script_cpp.sh b/ci/travis_before_script_cpp.sh
index 9908735..a38a0dc 100755
--- a/ci/travis_before_script_cpp.sh
+++ b/ci/travis_before_script_cpp.sh
@@ -26,12 +26,11 @@ source $TRAVIS_BUILD_DIR/ci/travis_env_common.sh
 if [ $only_library_mode == "no" ]; then
   # C++ toolchain
   export CPP_TOOLCHAIN=$TRAVIS_BUILD_DIR/cpp-toolchain
-  export FLATBUFFERS_HOME=$CPP_TOOLCHAIN
   export RAPIDJSON_HOME=$CPP_TOOLCHAIN
 
   # Set up C++ toolchain from conda-forge packages for faster builds
   source $TRAVIS_BUILD_DIR/ci/travis_install_conda.sh
-  conda create -y -q -p $CPP_TOOLCHAIN python=2.7 flatbuffers rapidjson
+  conda create -y -q -p $CPP_TOOLCHAIN python=2.7 rapidjson
 fi
 
 if [ $TRAVIS_OS_NAME == "osx" ]; then
@@ -73,7 +72,7 @@ else
           $ARROW_CPP_DIR
 fi
 
-make -j4
+make VERBOSE=1 -j4
 make install
 
 popd

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/ci/travis_script_python.sh
----------------------------------------------------------------------
diff --git a/ci/travis_script_python.sh b/ci/travis_script_python.sh
index 6cc760f..b82653d 100755
--- a/ci/travis_script_python.sh
+++ b/ci/travis_script_python.sh
@@ -72,6 +72,7 @@ function build_arrow_libraries() {
 
   cmake -DARROW_BUILD_TESTS=off \
         -DARROW_PYTHON=on \
+        -DPLASMA_PYTHON=on \
         -DCMAKE_INSTALL_PREFIX=$2 \
         $CPP_DIR
 

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt
index 962891a..0897e99 100644
--- a/cpp/CMakeLists.txt
+++ b/cpp/CMakeLists.txt
@@ -187,6 +187,8 @@ include(san-config)
 
 # For any C code, use the same flags.
 set(CMAKE_C_FLAGS "${CMAKE_CXX_FLAGS}")
+# Remove --std=c++11 to avoid errors from C compilers
+string(REPLACE "-std=c++11" "" CMAKE_C_FLAGS ${CMAKE_C_FLAGS})
 
 # Code coverage
 if ("${ARROW_GENERATE_COVERAGE}")
@@ -362,7 +364,7 @@ function(ADD_ARROW_TEST REL_TEST_NAME)
       APPEND_STRING PROPERTY
       COMPILE_FLAGS " -DARROW_VALGRIND")
     add_test(${TEST_NAME}
-      valgrind --tool=memcheck --leak-check=full --error-exitcode=1 ${TEST_PATH})
+      bash -c "cd ${EXECUTABLE_OUTPUT_PATH}; valgrind --tool=memcheck --leak-check=full --leak-check-heuristics=stdstring --error-exitcode=1 ${TEST_PATH}")
   elseif(MSVC)
     add_test(${TEST_NAME} ${TEST_PATH})
   else()
@@ -707,6 +709,7 @@ if (ARROW_IPC)
     ExternalProject_Add(flatbuffers_ep
       URL "https://github.com/google/flatbuffers/archive/v${FLATBUFFERS_VERSION}.tar.gz"
       CMAKE_ARGS
+      "-DCMAKE_CXX_FLAGS=-fPIC"
       "-DCMAKE_INSTALL_PREFIX:PATH=${FLATBUFFERS_PREFIX}"
       "-DFLATBUFFERS_BUILD_TESTS=OFF")
 
@@ -871,7 +874,12 @@ if (UNIX)
 
   FOREACH(item ${LINT_FILES})
     IF(NOT ((item MATCHES "_generated.h") OR
-            (item MATCHES "pyarrow_api.h")))
+            (item MATCHES "pyarrow_api.h") OR
+            (item MATCHES "xxhash.h") OR
+            (item MATCHES "xxhash.cc") OR
+            (item MATCHES "config.h") OR
+            (item MATCHES "zmalloc.h") OR
+            (item MATCHES "ae.h")))
       LIST(APPEND FILTERED_LINT_FILES ${item})
     ENDIF()
   ENDFOREACH(item ${LINT_FILES})
@@ -899,7 +907,10 @@ if (${CLANG_FORMAT_FOUND})
     sed -e '/windows_compatibility.h/g' |
     sed -e '/pyarrow_api.h/g' |
     sed -e '/config.h/g' |   # python/config.h
-    sed -e '/platform.h/g'`  # python/platform.h
+    sed -e '/platform.h/g' |  # python/platform.h
+    sed -e '/ae.h/g' |
+    sed -e '/xxhash.cc/g' |
+    sed -e '/xxhash.h/g'`
     )
 
   # runs clang format and exits with a non-zero exit code if any files need to be reformatted
@@ -1002,6 +1013,9 @@ if(FLATBUFFERS_VENDORED)
   set(ARROW_DEPENDENCIES ${ARROW_DEPENDENCIES} flatbuffers_ep)
 endif()
 
+if(NOT WIN32)
+  add_subdirectory(src/plasma)
+endif()
 add_subdirectory(src/arrow)
 add_subdirectory(src/arrow/io)
 if (ARROW_IPC)

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/arrow/status.h
----------------------------------------------------------------------
diff --git a/cpp/src/arrow/status.h b/cpp/src/arrow/status.h
index 1688b96..7e7f67c 100644
--- a/cpp/src/arrow/status.h
+++ b/cpp/src/arrow/status.h
@@ -83,6 +83,9 @@ enum class StatusCode : char {
   IOError = 5,
   UnknownError = 9,
   NotImplemented = 10,
+  PlasmaObjectExists = 20,
+  PlasmaObjectNonexistent = 21,
+  PlasmaStoreFull = 22
 };
 
 class ARROW_EXPORT Status {
@@ -129,6 +132,18 @@ class ARROW_EXPORT Status {
     return Status(StatusCode::IOError, msg, -1);
   }
 
+  static Status PlasmaObjectExists(const std::string& msg) {
+    return Status(StatusCode::PlasmaObjectExists, msg, -1);
+  }
+
+  static Status PlasmaObjectNonexistent(const std::string& msg) {
+    return Status(StatusCode::PlasmaObjectNonexistent, msg, -1);
+  }
+
+  static Status PlasmaStoreFull(const std::string& msg) {
+    return Status(StatusCode::PlasmaStoreFull, msg, -1);
+  }
+
   // Returns true iff the status indicates success.
   bool ok() const { return (state_ == NULL); }
 
@@ -139,6 +154,14 @@ class ARROW_EXPORT Status {
   bool IsTypeError() const { return code() == StatusCode::TypeError; }
   bool IsUnknownError() const { return code() == StatusCode::UnknownError; }
   bool IsNotImplemented() const { return code() == StatusCode::NotImplemented; }
+  // An object with this object ID already exists in the plasma store.
+  bool IsPlasmaObjectExists() const { return code() == StatusCode::PlasmaObjectExists; }
+  // An object was requested that doesn't exist in the plasma store.
+  bool IsPlasmaObjectNonexistent() const {
+    return code() == StatusCode::PlasmaObjectNonexistent;
+  }
+  // An object is too large to fit into the plasma store.
+  bool IsPlasmaStoreFull() const { return code() == StatusCode::PlasmaStoreFull; }
 
   // Return a string representation of this status suitable for printing.
   // Returns the string "OK" for success.

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/arrow/util/logging.h
----------------------------------------------------------------------
diff --git a/cpp/src/arrow/util/logging.h b/cpp/src/arrow/util/logging.h
index 697d47c..49f1699 100644
--- a/cpp/src/arrow/util/logging.h
+++ b/cpp/src/arrow/util/logging.h
@@ -30,6 +30,7 @@ namespace arrow {
 
 // Log levels. LOG ignores them, so their values are abitrary.
 
+#define ARROW_DEBUG (-1)
 #define ARROW_INFO 0
 #define ARROW_WARNING 1
 #define ARROW_ERROR 2
@@ -38,10 +39,9 @@ namespace arrow {
 #define ARROW_LOG_INTERNAL(level) ::arrow::internal::CerrLog(level)
 #define ARROW_LOG(level) ARROW_LOG_INTERNAL(ARROW_##level)
 
-#define ARROW_CHECK(condition)                           \
-  (condition) ? 0                                        \
-              : ::arrow::internal::FatalLog(ARROW_FATAL) \
-                    << __FILE__ << __LINE__ << " Check failed: " #condition " "
+#define ARROW_CHECK(condition)                               \
+  (condition) ? 0 : ::arrow::internal::FatalLog(ARROW_FATAL) \
+                        << __FILE__ << __LINE__ << " Check failed: " #condition " "
 
 #ifdef NDEBUG
 #define ARROW_DFATAL ARROW_WARNING

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/CMakeLists.txt b/cpp/src/plasma/CMakeLists.txt
new file mode 100644
index 0000000..992c33e
--- /dev/null
+++ b/cpp/src/plasma/CMakeLists.txt
@@ -0,0 +1,112 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+cmake_minimum_required(VERSION 2.8)
+
+project(plasma)
+
+find_package(PythonLibsNew REQUIRED)
+find_package(Threads)
+
+option(PLASMA_PYTHON
+  "Build the Plasma Python extensions"
+  OFF)
+
+if(APPLE)
+  SET(CMAKE_SHARED_LIBRARY_SUFFIX ".so")
+endif(APPLE)
+
+include_directories(SYSTEM ${PYTHON_INCLUDE_DIRS})
+include_directories("${FLATBUFFERS_INCLUDE_DIR}" "${CMAKE_CURRENT_LIST_DIR}/" "${CMAKE_CURRENT_LIST_DIR}/thirdparty/" "${CMAKE_CURRENT_LIST_DIR}/../")
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_XOPEN_SOURCE=500 -D_POSIX_C_SOURCE=200809L")
+
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-conversion")
+
+# Compile flatbuffers
+
+set(PLASMA_FBS_SRC "${CMAKE_CURRENT_LIST_DIR}/format/plasma.fbs" "${CMAKE_CURRENT_LIST_DIR}/format/common.fbs")
+set(OUTPUT_DIR ${CMAKE_CURRENT_LIST_DIR}/format/)
+
+set(PLASMA_FBS_OUTPUT_FILES
+  "${OUTPUT_DIR}/common_generated.h"
+  "${OUTPUT_DIR}/plasma_generated.h")
+
+add_custom_command(
+  OUTPUT ${PLASMA_FBS_OUTPUT_FILES}
+  # The --gen-object-api flag generates a C++ class MessageT for each
+  # flatbuffers message Message, which can be used to store deserialized
+  # messages in data structures. This is currently used for ObjectInfo for
+  # example.
+  COMMAND ${FLATBUFFERS_COMPILER} -c -o ${OUTPUT_DIR} ${PLASMA_FBS_SRC} --gen-object-api
+  DEPENDS ${PLASMA_FBS_SRC}
+  COMMENT "Running flatc compiler on ${PLASMA_FBS_SRC}"
+  VERBATIM)
+
+add_custom_target(gen_plasma_fbs DEPENDS ${PLASMA_FBS_OUTPUT_FILES})
+
+add_dependencies(gen_plasma_fbs flatbuffers_ep)
+
+if(UNIX AND NOT APPLE)
+  link_libraries(rt)
+endif()
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC")
+
+set_source_files_properties(thirdparty/dlmalloc.c PROPERTIES COMPILE_FLAGS -Wno-all)
+set_source_files_properties(extension.cc PROPERTIES COMPILE_FLAGS -Wno-strict-aliasing)
+
+set(PLASMA_SRCS
+  client.cc
+  common.cc
+  eviction_policy.cc
+  events.cc
+  fling.cc
+  io.cc
+  malloc.cc
+  plasma.cc
+  protocol.cc
+  thirdparty/ae/ae.c
+  thirdparty/xxhash.cc)
+
+ADD_ARROW_LIB(plasma
+  SOURCES ${PLASMA_SRCS}
+  DEPENDENCIES gen_plasma_fbs
+  SHARED_LINK_LIBS ${FLATBUFFERS_STATIC_LIB} ${CMAKE_THREAD_LIBS_INIT} arrow_static
+  STATIC_LINK_LIBS ${FLATBUFFERS_STATIC_LIB} ${CMAKE_THREAD_LIBS_INIT} arrow_static)
+
+# The optimization flag -O3 is suggested by dlmalloc.c, which is #included in
+# malloc.cc; we set it here regardless of whether we do a debug or release build.
+set_source_files_properties(malloc.cc PROPERTIES COMPILE_FLAGS "-Wno-error=conversion -O3")
+
+add_executable(plasma_store store.cc)
+target_link_libraries(plasma_store plasma_static)
+
+ADD_ARROW_TEST(test/serialization_tests)
+ARROW_TEST_LINK_LIBRARIES(test/serialization_tests plasma_static)
+ADD_ARROW_TEST(test/client_tests)
+ARROW_TEST_LINK_LIBRARIES(test/client_tests plasma_static)
+
+if(PLASMA_PYTHON)
+  add_library(plasma_extension SHARED extension.cc)
+
+  if(APPLE)
+    target_link_libraries(plasma_extension plasma_static "-undefined dynamic_lookup")
+  else(APPLE)
+    target_link_libraries(plasma_extension plasma_static -Wl,--whole-archive ${FLATBUFFERS_STATIC_LIB} -Wl,--no-whole-archive)
+  endif(APPLE)
+endif()

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/client.cc
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/client.cc b/cpp/src/plasma/client.cc
new file mode 100644
index 0000000..dcb78e7
--- /dev/null
+++ b/cpp/src/plasma/client.cc
@@ -0,0 +1,557 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// PLASMA CLIENT: Client library for using the plasma store and manager
+
+#include "plasma/client.h"
+
+#ifdef _WIN32
+#include <Win32_Interop/win32_types.h>
+#endif
+
+#include <assert.h>
+#include <fcntl.h>
+#include <netinet/in.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <thread>
+#include <vector>
+
+#include "plasma/common.h"
+#include "plasma/fling.h"
+#include "plasma/io.h"
+#include "plasma/plasma.h"
+#include "plasma/protocol.h"
+
+#define XXH_STATIC_LINKING_ONLY
+#include "thirdparty/xxhash.h"
+
+#define XXH64_DEFAULT_SEED 0
+
+// Number of threads used for memcopy and hash computations.
+constexpr int64_t kThreadPoolSize = 8;
+constexpr int64_t kBytesInMB = 1 << 20;
+static std::vector<std::thread> threadpool_(kThreadPoolSize);
+
+// If the file descriptor fd has been mmapped in this client process before,
+// return the pointer that was returned by mmap, otherwise mmap it and store the
+// pointer in a hash table.
+uint8_t* PlasmaClient::lookup_or_mmap(int fd, int store_fd_val, int64_t map_size) {
+  auto entry = mmap_table_.find(store_fd_val);
+  if (entry != mmap_table_.end()) {
+    close(fd);
+    return entry->second.pointer;
+  } else {
+    uint8_t* result = reinterpret_cast<uint8_t*>(
+        mmap(NULL, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0));
+    // TODO(pcm): Don't fail here, instead return a Status.
+    if (result == MAP_FAILED) { ARROW_LOG(FATAL) << "mmap failed"; }
+    close(fd);
+    ClientMmapTableEntry& entry = mmap_table_[store_fd_val];
+    entry.pointer = result;
+    entry.length = map_size;
+    entry.count = 0;
+    return result;
+  }
+}
+
+// Get a pointer to a file that we know has been memory mapped in this client
+// process before.
+uint8_t* PlasmaClient::lookup_mmapped_file(int store_fd_val) {
+  auto entry = mmap_table_.find(store_fd_val);
+  ARROW_CHECK(entry != mmap_table_.end());
+  return entry->second.pointer;
+}
+
+void PlasmaClient::increment_object_count(
+    const ObjectID& object_id, PlasmaObject* object, bool is_sealed) {
+  // Increment the count of the object to track the fact that it is being used.
+  // The corresponding decrement should happen in PlasmaClient::Release.
+  auto elem = objects_in_use_.find(object_id);
+  ObjectInUseEntry* object_entry;
+  if (elem == objects_in_use_.end()) {
+    // Add this object ID to the hash table of object IDs in use. The
+    // corresponding call to free happens in PlasmaClient::Release.
+    objects_in_use_[object_id] =
+        std::unique_ptr<ObjectInUseEntry>(new ObjectInUseEntry());
+    objects_in_use_[object_id]->object = *object;
+    objects_in_use_[object_id]->count = 0;
+    objects_in_use_[object_id]->is_sealed = is_sealed;
+    object_entry = objects_in_use_[object_id].get();
+    // Increment the count of the number of objects in the memory-mapped file
+    // that are being used. The corresponding decrement should happen in
+    // PlasmaClient::Release.
+    auto entry = mmap_table_.find(object->handle.store_fd);
+    ARROW_CHECK(entry != mmap_table_.end());
+    ARROW_CHECK(entry->second.count >= 0);
+    // Update the in_use_object_bytes_.
+    in_use_object_bytes_ +=
+        (object_entry->object.data_size + object_entry->object.metadata_size);
+    entry->second.count += 1;
+  } else {
+    object_entry = elem->second.get();
+    ARROW_CHECK(object_entry->count > 0);
+  }
+  // Increment the count of the number of instances of this object that are
+  // being used by this client. The corresponding decrement should happen in
+  // PlasmaClient::Release.
+  object_entry->count += 1;
+}
+
+Status PlasmaClient::Create(const ObjectID& object_id, int64_t data_size,
+    uint8_t* metadata, int64_t metadata_size, uint8_t** data) {
+  ARROW_LOG(DEBUG) << "called plasma_create on conn " << store_conn_ << " with size "
+                   << data_size << " and metadata size " << metadata_size;
+  RETURN_NOT_OK(SendCreateRequest(store_conn_, object_id, data_size, metadata_size));
+  std::vector<uint8_t> buffer;
+  RETURN_NOT_OK(PlasmaReceive(store_conn_, MessageType_PlasmaCreateReply, &buffer));
+  ObjectID id;
+  PlasmaObject object;
+  RETURN_NOT_OK(ReadCreateReply(buffer.data(), &id, &object));
+  // If the CreateReply included an error, then the store will not send a file
+  // descriptor.
+  int fd = recv_fd(store_conn_);
+  ARROW_CHECK(fd >= 0) << "recv not successful";
+  ARROW_CHECK(object.data_size == data_size);
+  ARROW_CHECK(object.metadata_size == metadata_size);
+  // The metadata should come right after the data.
+  ARROW_CHECK(object.metadata_offset == object.data_offset + data_size);
+  *data = lookup_or_mmap(fd, object.handle.store_fd, object.handle.mmap_size) +
+          object.data_offset;
+  // If plasma_create is being called from a transfer, then we will not copy the
+  // metadata here. The metadata will be written along with the data streamed
+  // from the transfer.
+  if (metadata != NULL) {
+    // Copy the metadata to the buffer.
+    memcpy(*data + object.data_size, metadata, metadata_size);
+  }
+  // Increment the count of the number of instances of this object that this
+  // client is using. A call to PlasmaClient::Release is required to decrement
+  // this
+  // count. Cache the reference to the object.
+  increment_object_count(object_id, &object, false);
+  // We increment the count a second time (and the corresponding decrement will
+  // happen in a PlasmaClient::Release call in plasma_seal) so even if the
+  // buffer
+  // returned by PlasmaClient::Dreate goes out of scope, the object does not get
+  // released before the call to PlasmaClient::Seal happens.
+  increment_object_count(object_id, &object, false);
+  return Status::OK();
+}
+
+Status PlasmaClient::Get(const ObjectID* object_ids, int64_t num_objects,
+    int64_t timeout_ms, ObjectBuffer* object_buffers) {
+  // Fill out the info for the objects that are already in use locally.
+  bool all_present = true;
+  for (int i = 0; i < num_objects; ++i) {
+    auto object_entry = objects_in_use_.find(object_ids[i]);
+    if (object_entry == objects_in_use_.end()) {
+      // This object is not currently in use by this client, so we need to send
+      // a request to the store.
+      all_present = false;
+      // Make a note to ourselves that the object is not present.
+      object_buffers[i].data_size = -1;
+    } else {
+      // NOTE: If the object is still unsealed, we will deadlock, since we must
+      // have been the one who created it.
+      ARROW_CHECK(object_entry->second->is_sealed)
+          << "Plasma client called get on an unsealed object that it created";
+      PlasmaObject* object = &object_entry->second->object;
+      object_buffers[i].data = lookup_mmapped_file(object->handle.store_fd);
+      object_buffers[i].data = object_buffers[i].data + object->data_offset;
+      object_buffers[i].data_size = object->data_size;
+      object_buffers[i].metadata = object_buffers[i].data + object->data_size;
+      object_buffers[i].metadata_size = object->metadata_size;
+      // Increment the count of the number of instances of this object that this
+      // client is using. A call to PlasmaClient::Release is required to
+      // decrement this
+      // count. Cache the reference to the object.
+      increment_object_count(object_ids[i], object, true);
+    }
+  }
+
+  if (all_present) { return Status::OK(); }
+
+  // If we get here, then the objects aren't all currently in use by this
+  // client, so we need to send a request to the plasma store.
+  RETURN_NOT_OK(SendGetRequest(store_conn_, object_ids, num_objects, timeout_ms));
+  std::vector<uint8_t> buffer;
+  RETURN_NOT_OK(PlasmaReceive(store_conn_, MessageType_PlasmaGetReply, &buffer));
+  std::vector<ObjectID> received_object_ids(num_objects);
+  std::vector<PlasmaObject> object_data(num_objects);
+  PlasmaObject* object;
+  RETURN_NOT_OK(ReadGetReply(
+      buffer.data(), received_object_ids.data(), object_data.data(), num_objects));
+
+  for (int i = 0; i < num_objects; ++i) {
+    DCHECK(received_object_ids[i] == object_ids[i]);
+    object = &object_data[i];
+    if (object_buffers[i].data_size != -1) {
+      // If the object was already in use by the client, then the store should
+      // have returned it.
+      DCHECK_NE(object->data_size, -1);
+      // We won't use this file descriptor, but the store sent us one, so we
+      // need to receive it and then close it right away so we don't leak file
+      // descriptors.
+      int fd = recv_fd(store_conn_);
+      close(fd);
+      ARROW_CHECK(fd >= 0);
+      // We've already filled out the information for this object, so we can
+      // just continue.
+      continue;
+    }
+    // If we are here, the object was not currently in use, so we need to
+    // process the reply from the object store.
+    if (object->data_size != -1) {
+      // The object was retrieved. The user will be responsible for releasing
+      // this object.
+      int fd = recv_fd(store_conn_);
+      ARROW_CHECK(fd >= 0);
+      object_buffers[i].data =
+          lookup_or_mmap(fd, object->handle.store_fd, object->handle.mmap_size);
+      // Finish filling out the return values.
+      object_buffers[i].data = object_buffers[i].data + object->data_offset;
+      object_buffers[i].data_size = object->data_size;
+      object_buffers[i].metadata = object_buffers[i].data + object->data_size;
+      object_buffers[i].metadata_size = object->metadata_size;
+      // Increment the count of the number of instances of this object that this
+      // client is using. A call to PlasmaClient::Release is required to
+      // decrement this
+      // count. Cache the reference to the object.
+      increment_object_count(received_object_ids[i], object, true);
+    } else {
+      // The object was not retrieved. Make sure we already put a -1 here to
+      // indicate that the object was not retrieved. The caller is not
+      // responsible for releasing this object.
+      DCHECK_EQ(object_buffers[i].data_size, -1);
+      object_buffers[i].data_size = -1;
+    }
+  }
+  return Status::OK();
+}
+
+/// This is a helper method for implementing plasma_release. We maintain a
+/// buffer
+/// of release calls and only perform them once the buffer becomes full (as
+/// judged by the aggregate sizes of the objects). There may be multiple release
+/// calls for the same object ID in the buffer. In this case, the first release
+/// calls will not do anything. The client will only send a message to the store
+/// releasing the object when the client is truly done with the object.
+///
+/// @param conn The plasma connection.
+/// @param object_id The object ID to attempt to release.
+Status PlasmaClient::PerformRelease(const ObjectID& object_id) {
+  // Decrement the count of the number of instances of this object that are
+  // being used by this client. The corresponding increment should have happened
+  // in PlasmaClient::Get.
+  auto object_entry = objects_in_use_.find(object_id);
+  ARROW_CHECK(object_entry != objects_in_use_.end());
+  object_entry->second->count -= 1;
+  ARROW_CHECK(object_entry->second->count >= 0);
+  // Check if the client is no longer using this object.
+  if (object_entry->second->count == 0) {
+    // Decrement the count of the number of objects in this memory-mapped file
+    // that the client is using. The corresponding increment should have
+    // happened in plasma_get.
+    int fd = object_entry->second->object.handle.store_fd;
+    auto entry = mmap_table_.find(fd);
+    ARROW_CHECK(entry != mmap_table_.end());
+    entry->second.count -= 1;
+    ARROW_CHECK(entry->second.count >= 0);
+    // If none are being used then unmap the file.
+    if (entry->second.count == 0) {
+      munmap(entry->second.pointer, entry->second.length);
+      // Remove the corresponding entry from the hash table.
+      mmap_table_.erase(fd);
+    }
+    // Tell the store that the client no longer needs the object.
+    RETURN_NOT_OK(SendReleaseRequest(store_conn_, object_id));
+    // Update the in_use_object_bytes_.
+    in_use_object_bytes_ -= (object_entry->second->object.data_size +
+                             object_entry->second->object.metadata_size);
+    DCHECK_GE(in_use_object_bytes_, 0);
+    // Remove the entry from the hash table of objects currently in use.
+    objects_in_use_.erase(object_id);
+  }
+  return Status::OK();
+}
+
+Status PlasmaClient::Release(const ObjectID& object_id) {
+  // Add the new object to the release history.
+  release_history_.push_front(object_id);
+  // If there are too many bytes in use by the client or if there are too many
+  // pending release calls, and there are at least some pending release calls in
+  // the release_history list, then release some objects.
+  while ((in_use_object_bytes_ > std::min(kL3CacheSizeBytes, store_capacity_ / 100) ||
+             release_history_.size() > config_.release_delay) &&
+         release_history_.size() > 0) {
+    // Perform a release for the object ID for the first pending release.
+    RETURN_NOT_OK(PerformRelease(release_history_.back()));
+    // Remove the last entry from the release history.
+    release_history_.pop_back();
+  }
+  return Status::OK();
+}
+
+// This method is used to query whether the plasma store contains an object.
+Status PlasmaClient::Contains(const ObjectID& object_id, bool* has_object) {
+  // Check if we already have a reference to the object.
+  if (objects_in_use_.count(object_id) > 0) {
+    *has_object = 1;
+  } else {
+    // If we don't already have a reference to the object, check with the store
+    // to see if we have the object.
+    RETURN_NOT_OK(SendContainsRequest(store_conn_, object_id));
+    std::vector<uint8_t> buffer;
+    RETURN_NOT_OK(PlasmaReceive(store_conn_, MessageType_PlasmaContainsReply, &buffer));
+    ObjectID object_id2;
+    RETURN_NOT_OK(ReadContainsReply(buffer.data(), &object_id2, has_object));
+  }
+  return Status::OK();
+}
+
+static void ComputeBlockHash(const unsigned char* data, int64_t nbytes, uint64_t* hash) {
+  XXH64_state_t hash_state;
+  XXH64_reset(&hash_state, XXH64_DEFAULT_SEED);
+  XXH64_update(&hash_state, data, nbytes);
+  *hash = XXH64_digest(&hash_state);
+}
+
+static inline bool compute_object_hash_parallel(
+    XXH64_state_t* hash_state, const unsigned char* data, int64_t nbytes) {
+  // Note that this function will likely be faster if the address of data is
+  // aligned on a 64-byte boundary.
+  const int num_threads = kThreadPoolSize;
+  uint64_t threadhash[num_threads + 1];
+  const uint64_t data_address = reinterpret_cast<uint64_t>(data);
+  const uint64_t num_blocks = nbytes / BLOCK_SIZE;
+  const uint64_t chunk_size = (num_blocks / num_threads) * BLOCK_SIZE;
+  const uint64_t right_address = data_address + chunk_size * num_threads;
+  const uint64_t suffix = (data_address + nbytes) - right_address;
+  // Now the data layout is | k * num_threads * block_size | suffix | ==
+  // | num_threads * chunk_size | suffix |, where chunk_size = k * block_size.
+  // Each thread gets a "chunk" of k blocks, except the suffix thread.
+
+  for (int i = 0; i < num_threads; i++) {
+    threadpool_[i] = std::thread(ComputeBlockHash,
+        reinterpret_cast<uint8_t*>(data_address) + i * chunk_size, chunk_size,
+        &threadhash[i]);
+  }
+  ComputeBlockHash(
+      reinterpret_cast<uint8_t*>(right_address), suffix, &threadhash[num_threads]);
+
+  // Join the threads.
+  for (auto& t : threadpool_) {
+    if (t.joinable()) { t.join(); }
+  }
+
+  XXH64_update(hash_state, (unsigned char*)threadhash, sizeof(threadhash));
+  return true;
+}
+
+static uint64_t compute_object_hash(const ObjectBuffer& obj_buffer) {
+  XXH64_state_t hash_state;
+  XXH64_reset(&hash_state, XXH64_DEFAULT_SEED);
+  if (obj_buffer.data_size >= kBytesInMB) {
+    compute_object_hash_parallel(
+        &hash_state, (unsigned char*)obj_buffer.data, obj_buffer.data_size);
+  } else {
+    XXH64_update(&hash_state, (unsigned char*)obj_buffer.data, obj_buffer.data_size);
+  }
+  XXH64_update(
+      &hash_state, (unsigned char*)obj_buffer.metadata, obj_buffer.metadata_size);
+  return XXH64_digest(&hash_state);
+}
+
+bool plasma_compute_object_hash(
+    PlasmaClient* conn, ObjectID object_id, unsigned char* digest) {
+  // Get the plasma object data. We pass in a timeout of 0 to indicate that
+  // the operation should timeout immediately.
+  ObjectBuffer object_buffer;
+  ARROW_CHECK_OK(conn->Get(&object_id, 1, 0, &object_buffer));
+  // If the object was not retrieved, return false.
+  if (object_buffer.data_size == -1) { return false; }
+  // Compute the hash.
+  uint64_t hash = compute_object_hash(object_buffer);
+  memcpy(digest, &hash, sizeof(hash));
+  // Release the plasma object.
+  ARROW_CHECK_OK(conn->Release(object_id));
+  return true;
+}
+
+Status PlasmaClient::Seal(const ObjectID& object_id) {
+  // Make sure this client has a reference to the object before sending the
+  // request to Plasma.
+  auto object_entry = objects_in_use_.find(object_id);
+  ARROW_CHECK(object_entry != objects_in_use_.end())
+      << "Plasma client called seal an object without a reference to it";
+  ARROW_CHECK(!object_entry->second->is_sealed)
+      << "Plasma client called seal an already sealed object";
+  object_entry->second->is_sealed = true;
+  /// Send the seal request to Plasma.
+  static unsigned char digest[kDigestSize];
+  ARROW_CHECK(plasma_compute_object_hash(this, object_id, &digest[0]));
+  RETURN_NOT_OK(SendSealRequest(store_conn_, object_id, &digest[0]));
+  // We call PlasmaClient::Release to decrement the number of instances of this
+  // object
+  // that are currently being used by this client. The corresponding increment
+  // happened in plasma_create and was used to ensure that the object was not
+  // released before the call to PlasmaClient::Seal.
+  return Release(object_id);
+}
+
+Status PlasmaClient::Delete(const ObjectID& object_id) {
+  // TODO(rkn): In the future, we can use this method to give hints to the
+  // eviction policy about when an object will no longer be needed.
+  return Status::NotImplemented("PlasmaClient::Delete is not implemented.");
+}
+
+Status PlasmaClient::Evict(int64_t num_bytes, int64_t& num_bytes_evicted) {
+  // Send a request to the store to evict objects.
+  RETURN_NOT_OK(SendEvictRequest(store_conn_, num_bytes));
+  // Wait for a response with the number of bytes actually evicted.
+  std::vector<uint8_t> buffer;
+  int64_t type;
+  RETURN_NOT_OK(ReadMessage(store_conn_, &type, &buffer));
+  return ReadEvictReply(buffer.data(), num_bytes_evicted);
+}
+
+Status PlasmaClient::Subscribe(int* fd) {
+  int sock[2];
+  // Create a non-blocking socket pair. This will only be used to send
+  // notifications from the Plasma store to the client.
+  socketpair(AF_UNIX, SOCK_STREAM, 0, sock);
+  // Make the socket non-blocking.
+  int flags = fcntl(sock[1], F_GETFL, 0);
+  ARROW_CHECK(fcntl(sock[1], F_SETFL, flags | O_NONBLOCK) == 0);
+  // Tell the Plasma store about the subscription.
+  RETURN_NOT_OK(SendSubscribeRequest(store_conn_));
+  // Send the file descriptor that the Plasma store should use to push
+  // notifications about sealed objects to this client.
+  ARROW_CHECK(send_fd(store_conn_, sock[1]) >= 0);
+  close(sock[1]);
+  // Return the file descriptor that the client should use to read notifications
+  // about sealed objects.
+  *fd = sock[0];
+  return Status::OK();
+}
+
+Status PlasmaClient::Connect(const std::string& store_socket_name,
+    const std::string& manager_socket_name, int release_delay) {
+  store_conn_ = connect_ipc_sock_retry(store_socket_name, -1, -1);
+  if (manager_socket_name != "") {
+    manager_conn_ = connect_ipc_sock_retry(manager_socket_name, -1, -1);
+  } else {
+    manager_conn_ = -1;
+  }
+  config_.release_delay = release_delay;
+  in_use_object_bytes_ = 0;
+  // Send a ConnectRequest to the store to get its memory capacity.
+  RETURN_NOT_OK(SendConnectRequest(store_conn_));
+  std::vector<uint8_t> buffer;
+  RETURN_NOT_OK(PlasmaReceive(store_conn_, MessageType_PlasmaConnectReply, &buffer));
+  RETURN_NOT_OK(ReadConnectReply(buffer.data(), &store_capacity_));
+  return Status::OK();
+}
+
+Status PlasmaClient::Disconnect() {
+  // NOTE: We purposefully do not finish sending release calls for objects in
+  // use, so that we don't duplicate PlasmaClient::Release calls (when handling
+  // a SIGTERM, for example).
+
+  // Close the connections to Plasma. The Plasma store will release the objects
+  // that were in use by us when handling the SIGPIPE.
+  close(store_conn_);
+  if (manager_conn_ >= 0) { close(manager_conn_); }
+  return Status::OK();
+}
+
+#define h_addr h_addr_list[0]
+
+Status PlasmaClient::Transfer(const char* address, int port, const ObjectID& object_id) {
+  return SendDataRequest(manager_conn_, object_id, address, port);
+}
+
+Status PlasmaClient::Fetch(int num_object_ids, const ObjectID* object_ids) {
+  ARROW_CHECK(manager_conn_ >= 0);
+  return SendFetchRequest(manager_conn_, object_ids, num_object_ids);
+}
+
+int PlasmaClient::get_manager_fd() {
+  return manager_conn_;
+}
+
+Status PlasmaClient::Info(const ObjectID& object_id, int* object_status) {
+  ARROW_CHECK(manager_conn_ >= 0);
+
+  RETURN_NOT_OK(SendStatusRequest(manager_conn_, &object_id, 1));
+  std::vector<uint8_t> buffer;
+  RETURN_NOT_OK(PlasmaReceive(manager_conn_, MessageType_PlasmaStatusReply, &buffer));
+  ObjectID id;
+  RETURN_NOT_OK(ReadStatusReply(buffer.data(), &id, object_status, 1));
+  ARROW_CHECK(object_id == id);
+  return Status::OK();
+}
+
+Status PlasmaClient::Wait(int64_t num_object_requests, ObjectRequest* object_requests,
+    int num_ready_objects, int64_t timeout_ms, int* num_objects_ready) {
+  ARROW_CHECK(manager_conn_ >= 0);
+  ARROW_CHECK(num_object_requests > 0);
+  ARROW_CHECK(num_ready_objects > 0);
+  ARROW_CHECK(num_ready_objects <= num_object_requests);
+
+  for (int i = 0; i < num_object_requests; ++i) {
+    ARROW_CHECK(object_requests[i].type == PLASMA_QUERY_LOCAL ||
+                object_requests[i].type == PLASMA_QUERY_ANYWHERE);
+  }
+
+  RETURN_NOT_OK(SendWaitRequest(manager_conn_, object_requests, num_object_requests,
+      num_ready_objects, timeout_ms));
+  std::vector<uint8_t> buffer;
+  RETURN_NOT_OK(PlasmaReceive(manager_conn_, MessageType_PlasmaWaitReply, &buffer));
+  RETURN_NOT_OK(ReadWaitReply(buffer.data(), object_requests, &num_ready_objects));
+
+  *num_objects_ready = 0;
+  for (int i = 0; i < num_object_requests; ++i) {
+    int type = object_requests[i].type;
+    int status = object_requests[i].status;
+    switch (type) {
+      case PLASMA_QUERY_LOCAL:
+        if (status == ObjectStatus_Local) { *num_objects_ready += 1; }
+        break;
+      case PLASMA_QUERY_ANYWHERE:
+        if (status == ObjectStatus_Local || status == ObjectStatus_Remote) {
+          *num_objects_ready += 1;
+        } else {
+          ARROW_CHECK(status == ObjectStatus_Nonexistent);
+        }
+        break;
+      default:
+        ARROW_LOG(FATAL) << "This code should be unreachable.";
+    }
+  }
+  return Status::OK();
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/client.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/client.h b/cpp/src/plasma/client.h
new file mode 100644
index 0000000..fb3a161
--- /dev/null
+++ b/cpp/src/plasma/client.h
@@ -0,0 +1,343 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef PLASMA_CLIENT_H
+#define PLASMA_CLIENT_H
+
+#include <stdbool.h>
+#include <time.h>
+
+#include <deque>
+#include <string>
+
+#include "plasma/plasma.h"
+
+using arrow::Status;
+
+#define PLASMA_DEFAULT_RELEASE_DELAY 64
+
+// Use 100MB as an overestimate of the L3 cache size.
+constexpr int64_t kL3CacheSizeBytes = 100000000;
+
+/// Object buffer data structure.
+struct ObjectBuffer {
+  /// The size in bytes of the data object.
+  int64_t data_size;
+  /// The address of the data object.
+  uint8_t* data;
+  /// The metadata size in bytes.
+  int64_t metadata_size;
+  /// The address of the metadata.
+  uint8_t* metadata;
+};
+
+/// Configuration options for the plasma client.
+struct PlasmaClientConfig {
+  /// Number of release calls we wait until the object is actually released.
+  /// This allows us to avoid invalidating the cpu cache on workers if objects
+  /// are reused accross tasks.
+  size_t release_delay;
+};
+
+struct ClientMmapTableEntry {
+  /// The result of mmap for this file descriptor.
+  uint8_t* pointer;
+  /// The length of the memory-mapped file.
+  size_t length;
+  /// The number of objects in this memory-mapped file that are currently being
+  /// used by the client. When this count reaches zeros, we unmap the file.
+  int count;
+};
+
+struct ObjectInUseEntry {
+  /// A count of the number of times this client has called PlasmaClient::Create
+  /// or
+  /// PlasmaClient::Get on this object ID minus the number of calls to
+  /// PlasmaClient::Release.
+  /// When this count reaches zero, we remove the entry from the ObjectsInUse
+  /// and decrement a count in the relevant ClientMmapTableEntry.
+  int count;
+  /// Cached information to read the object.
+  PlasmaObject object;
+  /// A flag representing whether the object has been sealed.
+  bool is_sealed;
+};
+
+class PlasmaClient {
+ public:
+  /// Connect to the local plasma store and plasma manager. Return
+  /// the resulting connection.
+  ///
+  /// @param store_socket_name The name of the UNIX domain socket to use to
+  ///        connect to the Plasma store.
+  /// @param manager_socket_name The name of the UNIX domain socket to use to
+  ///        connect to the local Plasma manager. If this is "", then this
+  ///        function will not connect to a manager.
+  /// @param release_delay Number of released objects that are kept around
+  ///        and not evicted to avoid too many munmaps.
+  /// @return The return status.
+  Status Connect(const std::string& store_socket_name,
+      const std::string& manager_socket_name, int release_delay);
+
+  /// Create an object in the Plasma Store. Any metadata for this object must be
+  /// be passed in when the object is created.
+  ///
+  /// @param object_id The ID to use for the newly created object.
+  /// @param data_size The size in bytes of the space to be allocated for this
+  /// object's
+  ///        data (this does not include space used for metadata).
+  /// @param metadata The object's metadata. If there is no metadata, this
+  /// pointer
+  ///        should be NULL.
+  /// @param metadata_size The size in bytes of the metadata. If there is no
+  ///        metadata, this should be 0.
+  /// @param data The address of the newly created object will be written here.
+  /// @return The return status.
+  Status Create(const ObjectID& object_id, int64_t data_size, uint8_t* metadata,
+      int64_t metadata_size, uint8_t** data);
+
+  /// Get some objects from the Plasma Store. This function will block until the
+  /// objects have all been created and sealed in the Plasma Store or the
+  /// timeout
+  /// expires. The caller is responsible for releasing any retrieved objects,
+  /// but
+  /// the caller should not release objects that were not retrieved.
+  ///
+  /// @param object_ids The IDs of the objects to get.
+  /// @param num_object_ids The number of object IDs to get.
+  /// @param timeout_ms The amount of time in milliseconds to wait before this
+  ///        request times out. If this value is -1, then no timeout is set.
+  /// @param object_buffers An array where the results will be stored. If the
+  /// data
+  ///        size field is -1, then the object was not retrieved.
+  /// @return The return status.
+  Status Get(const ObjectID* object_ids, int64_t num_objects, int64_t timeout_ms,
+      ObjectBuffer* object_buffers);
+
+  /// Tell Plasma that the client no longer needs the object. This should be
+  /// called
+  /// after Get when the client is done with the object. After this call,
+  /// the address returned by Get is no longer valid. This should be called
+  /// once for each call to Get (with the same object ID).
+  ///
+  /// @param object_id The ID of the object that is no longer needed.
+  /// @return The return status.
+  Status Release(const ObjectID& object_id);
+
+  /// Check if the object store contains a particular object and the object has
+  /// been sealed. The result will be stored in has_object.
+  ///
+  /// @todo: We may want to indicate if the object has been created but not
+  /// sealed.
+  ///
+  /// @param object_id The ID of the object whose presence we are checking.
+  /// @param has_object The function will write true at this address if
+  ///        the object is present and false if it is not present.
+  /// @return The return status.
+  Status Contains(const ObjectID& object_id, bool* has_object);
+
+  /// Seal an object in the object store. The object will be immutable after
+  /// this
+  /// call.
+  ///
+  /// @param object_id The ID of the object to seal.
+  /// @return The return status.
+  Status Seal(const ObjectID& object_id);
+
+  /// Delete an object from the object store. This currently assumes that the
+  /// object is present and has been sealed.
+  ///
+  /// @todo We may want to allow the deletion of objects that are not present or
+  ///       haven't been sealed.
+  ///
+  /// @param object_id The ID of the object to delete.
+  /// @return The return status.
+  Status Delete(const ObjectID& object_id);
+
+  /// Delete objects until we have freed up num_bytes bytes or there are no more
+  /// released objects that can be deleted.
+  ///
+  /// @param num_bytes The number of bytes to try to free up.
+  /// @param num_bytes_evicted Out parameter for total number of bytes of space
+  /// retrieved.
+  /// @return The return status.
+  Status Evict(int64_t num_bytes, int64_t& num_bytes_evicted);
+
+  /// Subscribe to notifications when objects are sealed in the object store.
+  /// Whenever an object is sealed, a message will be written to the client
+  /// socket
+  /// that is returned by this method.
+  ///
+  /// @param fd Out parameter for the file descriptor the client should use to
+  /// read notifications
+  ///         from the object store about sealed objects.
+  /// @return The return status.
+  Status Subscribe(int* fd);
+
+  /// Disconnect from the local plasma instance, including the local store and
+  /// manager.
+  ///
+  /// @return The return status.
+  Status Disconnect();
+
+  /// Attempt to initiate the transfer of some objects from remote Plasma
+  /// Stores.
+  /// This method does not guarantee that the fetched objects will arrive
+  /// locally.
+  ///
+  /// For an object that is available in the local Plasma Store, this method
+  /// will
+  /// not do anything. For an object that is not available locally, it will
+  /// check
+  /// if the object are already being fetched. If so, it will not do anything.
+  /// If
+  /// not, it will query the object table for a list of Plasma Managers that
+  /// have
+  /// the object. The object table will return a non-empty list, and this Plasma
+  /// Manager will attempt to initiate transfers from one of those Plasma
+  /// Managers.
+  ///
+  /// This function is non-blocking.
+  ///
+  /// This method is idempotent in the sense that it is ok to call it multiple
+  /// times.
+  ///
+  /// @param num_object_ids The number of object IDs fetch is being called on.
+  /// @param object_ids The IDs of the objects that fetch is being called on.
+  /// @return The return status.
+  Status Fetch(int num_object_ids, const ObjectID* object_ids);
+
+  /// Wait for (1) a specified number of objects to be available (sealed) in the
+  /// local Plasma Store or in a remote Plasma Store, or (2) for a timeout to
+  /// expire. This is a blocking call.
+  ///
+  /// @param num_object_requests Size of the object_requests array.
+  /// @param object_requests Object event array. Each element contains a request
+  ///        for a particular object_id. The type of request is specified in the
+  ///        "type" field.
+  ///        - A PLASMA_QUERY_LOCAL request is satisfied when object_id becomes
+  ///          available in the local Plasma Store. In this case, this function
+  ///          sets the "status" field to ObjectStatus_Local. Note, if the
+  ///          status
+  ///          is not ObjectStatus_Local, it will be ObjectStatus_Nonexistent,
+  ///          but it may exist elsewhere in the system.
+  ///        - A PLASMA_QUERY_ANYWHERE request is satisfied when object_id
+  ///        becomes
+  ///          available either at the local Plasma Store or on a remote Plasma
+  ///          Store. In this case, the functions sets the "status" field to
+  ///          ObjectStatus_Local or ObjectStatus_Remote.
+  /// @param num_ready_objects The number of requests in object_requests array
+  /// that
+  ///        must be satisfied before the function returns, unless it timeouts.
+  ///        The num_ready_objects should be no larger than num_object_requests.
+  /// @param timeout_ms Timeout value in milliseconds. If this timeout expires
+  ///        before min_num_ready_objects of requests are satisfied, the
+  ///        function
+  ///        returns.
+  /// @param num_objects_ready Out parameter for number of satisfied requests in
+  ///        the object_requests list. If the returned number is less than
+  ///        min_num_ready_objects this means that timeout expired.
+  /// @return The return status.
+  Status Wait(int64_t num_object_requests, ObjectRequest* object_requests,
+      int num_ready_objects, int64_t timeout_ms, int* num_objects_ready);
+
+  /// Transfer local object to a different plasma manager.
+  ///
+  /// @param conn The object containing the connection state.
+  /// @param addr IP address of the plasma manager we are transfering to.
+  /// @param port Port of the plasma manager we are transfering to.
+  /// @object_id ObjectID of the object we are transfering.
+  /// @return The return status.
+  Status Transfer(const char* addr, int port, const ObjectID& object_id);
+
+  /// Return the status of a given object. This method may query the object
+  /// table.
+  ///
+  /// @param conn The object containing the connection state.
+  /// @param object_id The ID of the object whose status we query.
+  /// @param object_status Out parameter for object status. Can take the
+  ///         following values.
+  ///         - PLASMA_CLIENT_LOCAL, if object is stored in the local Plasma
+  ///         Store.
+  ///           has been already scheduled by the Plasma Manager.
+  ///         - PLASMA_CLIENT_TRANSFER, if the object is either currently being
+  ///           transferred or just scheduled.
+  ///         - PLASMA_CLIENT_REMOTE, if the object is stored at a remote
+  ///           Plasma Store.
+  ///         - PLASMA_CLIENT_DOES_NOT_EXIST, if the object doesn’t exist in the
+  ///           system.
+  /// @return The return status.
+  Status Info(const ObjectID& object_id, int* object_status);
+
+  /// Get the file descriptor for the socket connection to the plasma manager.
+  ///
+  /// @param conn The plasma connection.
+  /// @return The file descriptor for the manager connection. If there is no
+  ///         connection to the manager, this is -1.
+  int get_manager_fd();
+
+ private:
+  Status PerformRelease(const ObjectID& object_id);
+
+  uint8_t* lookup_or_mmap(int fd, int store_fd_val, int64_t map_size);
+
+  uint8_t* lookup_mmapped_file(int store_fd_val);
+
+  void increment_object_count(
+      const ObjectID& object_id, PlasmaObject* object, bool is_sealed);
+
+  /// File descriptor of the Unix domain socket that connects to the store.
+  int store_conn_;
+  /// File descriptor of the Unix domain socket that connects to the manager.
+  int manager_conn_;
+  /// Table of dlmalloc buffer files that have been memory mapped so far. This
+  /// is a hash table mapping a file descriptor to a struct containing the
+  /// address of the corresponding memory-mapped file.
+  std::unordered_map<int, ClientMmapTableEntry> mmap_table_;
+  /// A hash table of the object IDs that are currently being used by this
+  /// client.
+  std::unordered_map<ObjectID, std::unique_ptr<ObjectInUseEntry>, UniqueIDHasher>
+      objects_in_use_;
+  /// Object IDs of the last few release calls. This is a deque and
+  /// is used to delay releasing objects to see if they can be reused by
+  /// subsequent tasks so we do not unneccessarily invalidate cpu caches.
+  /// TODO(pcm): replace this with a proper lru cache using the size of the L3
+  /// cache.
+  std::deque<ObjectID> release_history_;
+  /// The number of bytes in the combined objects that are held in the release
+  /// history doubly-linked list. If this is too large then the client starts
+  /// releasing objects.
+  int64_t in_use_object_bytes_;
+  /// Configuration options for the plasma client.
+  PlasmaClientConfig config_;
+  /// The amount of memory available to the Plasma store. The client needs this
+  /// information to make sure that it does not delay in releasing so much
+  /// memory that the store is unable to evict enough objects to free up space.
+  int64_t store_capacity_;
+};
+
+/// Compute the hash of an object in the object store.
+///
+/// @param conn The object containing the connection state.
+/// @param object_id The ID of the object we want to hash.
+/// @param digest A pointer at which to return the hash digest of the object.
+///        The pointer must have at least DIGEST_SIZE bytes allocated.
+/// @return A boolean representing whether the hash operation succeeded.
+bool plasma_compute_object_hash(
+    PlasmaClient* conn, ObjectID object_id, unsigned char* digest);
+
+#endif  // PLASMA_CLIENT_H

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/common.cc
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/common.cc b/cpp/src/plasma/common.cc
new file mode 100644
index 0000000..a09a963
--- /dev/null
+++ b/cpp/src/plasma/common.cc
@@ -0,0 +1,83 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "plasma/common.h"
+
+#include <random>
+
+#include "format/plasma_generated.h"
+
+using arrow::Status;
+
+UniqueID UniqueID::from_random() {
+  UniqueID id;
+  uint8_t* data = id.mutable_data();
+  std::random_device engine;
+  for (int i = 0; i < kUniqueIDSize; i++) {
+    data[i] = static_cast<uint8_t>(engine());
+  }
+  return id;
+}
+
+UniqueID UniqueID::from_binary(const std::string& binary) {
+  UniqueID id;
+  std::memcpy(&id, binary.data(), sizeof(id));
+  return id;
+}
+
+const uint8_t* UniqueID::data() const {
+  return id_;
+}
+
+uint8_t* UniqueID::mutable_data() {
+  return id_;
+}
+
+std::string UniqueID::binary() const {
+  return std::string(reinterpret_cast<const char*>(id_), kUniqueIDSize);
+}
+
+std::string UniqueID::hex() const {
+  constexpr char hex[] = "0123456789abcdef";
+  std::string result;
+  for (int i = 0; i < kUniqueIDSize; i++) {
+    unsigned int val = id_[i];
+    result.push_back(hex[val >> 4]);
+    result.push_back(hex[val & 0xf]);
+  }
+  return result;
+}
+
+bool UniqueID::operator==(const UniqueID& rhs) const {
+  return std::memcmp(data(), rhs.data(), kUniqueIDSize) == 0;
+}
+
+Status plasma_error_status(int plasma_error) {
+  switch (plasma_error) {
+    case PlasmaError_OK:
+      return Status::OK();
+    case PlasmaError_ObjectExists:
+      return Status::PlasmaObjectExists("object already exists in the plasma store");
+    case PlasmaError_ObjectNonexistent:
+      return Status::PlasmaObjectNonexistent("object does not exist in the plasma store");
+    case PlasmaError_OutOfMemory:
+      return Status::PlasmaStoreFull("object does not fit in the plasma store");
+    default:
+      ARROW_LOG(FATAL) << "unknown plasma error code " << plasma_error;
+  }
+  return Status::OK();
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/common.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/common.h b/cpp/src/plasma/common.h
new file mode 100644
index 0000000..85dc74b
--- /dev/null
+++ b/cpp/src/plasma/common.h
@@ -0,0 +1,63 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef PLASMA_COMMON_H
+#define PLASMA_COMMON_H
+
+#include <cstring>
+#include <string>
+// TODO(pcm): Convert getopt and sscanf in the store to use more idiomatic C++
+// and get rid of the next three lines:
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include "arrow/status.h"
+#include "arrow/util/logging.h"
+
+constexpr int64_t kUniqueIDSize = 20;
+
+class UniqueID {
+ public:
+  static UniqueID from_random();
+  static UniqueID from_binary(const std::string& binary);
+  bool operator==(const UniqueID& rhs) const;
+  const uint8_t* data() const;
+  uint8_t* mutable_data();
+  std::string binary() const;
+  std::string hex() const;
+
+ private:
+  uint8_t id_[kUniqueIDSize];
+};
+
+static_assert(std::is_pod<UniqueID>::value, "UniqueID must be plain old data");
+
+struct UniqueIDHasher {
+  // ObjectID hashing function.
+  size_t operator()(const UniqueID& id) const {
+    size_t result;
+    std::memcpy(&result, id.data(), sizeof(size_t));
+    return result;
+  }
+};
+
+typedef UniqueID ObjectID;
+
+arrow::Status plasma_error_status(int plasma_error);
+
+#endif  // PLASMA_COMMON_H

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/events.cc
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/events.cc b/cpp/src/plasma/events.cc
new file mode 100644
index 0000000..a9f7356
--- /dev/null
+++ b/cpp/src/plasma/events.cc
@@ -0,0 +1,81 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "plasma/events.h"
+
+#include <errno.h>
+
+void EventLoop::file_event_callback(
+    aeEventLoop* loop, int fd, void* context, int events) {
+  FileCallback* callback = reinterpret_cast<FileCallback*>(context);
+  (*callback)(events);
+}
+
+int EventLoop::timer_event_callback(aeEventLoop* loop, TimerID timer_id, void* context) {
+  TimerCallback* callback = reinterpret_cast<TimerCallback*>(context);
+  return (*callback)(timer_id);
+}
+
+constexpr int kInitialEventLoopSize = 1024;
+
+EventLoop::EventLoop() {
+  loop_ = aeCreateEventLoop(kInitialEventLoopSize);
+}
+
+bool EventLoop::add_file_event(int fd, int events, const FileCallback& callback) {
+  if (file_callbacks_.find(fd) != file_callbacks_.end()) { return false; }
+  auto data = std::unique_ptr<FileCallback>(new FileCallback(callback));
+  void* context = reinterpret_cast<void*>(data.get());
+  // Try to add the file descriptor.
+  int err = aeCreateFileEvent(loop_, fd, events, EventLoop::file_event_callback, context);
+  // If it cannot be added, increase the size of the event loop.
+  if (err == AE_ERR && errno == ERANGE) {
+    err = aeResizeSetSize(loop_, 3 * aeGetSetSize(loop_) / 2);
+    if (err != AE_OK) { return false; }
+    err = aeCreateFileEvent(loop_, fd, events, EventLoop::file_event_callback, context);
+  }
+  // In any case, test if there were errors.
+  if (err == AE_OK) {
+    file_callbacks_.emplace(fd, std::move(data));
+    return true;
+  }
+  return false;
+}
+
+void EventLoop::remove_file_event(int fd) {
+  aeDeleteFileEvent(loop_, fd, AE_READABLE | AE_WRITABLE);
+  file_callbacks_.erase(fd);
+}
+
+void EventLoop::run() {
+  aeMain(loop_);
+}
+
+int64_t EventLoop::add_timer(int64_t timeout, const TimerCallback& callback) {
+  auto data = std::unique_ptr<TimerCallback>(new TimerCallback(callback));
+  void* context = reinterpret_cast<void*>(data.get());
+  int64_t timer_id =
+      aeCreateTimeEvent(loop_, timeout, EventLoop::timer_event_callback, context, NULL);
+  timer_callbacks_.emplace(timer_id, std::move(data));
+  return timer_id;
+}
+
+int EventLoop::remove_timer(int64_t timer_id) {
+  int err = aeDeleteTimeEvent(loop_, timer_id);
+  timer_callbacks_.erase(timer_id);
+  return err;
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/events.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/events.h b/cpp/src/plasma/events.h
new file mode 100644
index 0000000..bd93d6b
--- /dev/null
+++ b/cpp/src/plasma/events.h
@@ -0,0 +1,99 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef PLASMA_EVENTS
+#define PLASMA_EVENTS
+
+#include <functional>
+#include <memory>
+#include <unordered_map>
+
+extern "C" {
+#include "ae/ae.h"
+}
+
+/// Constant specifying that the timer is done and it will be removed.
+constexpr int kEventLoopTimerDone = AE_NOMORE;
+
+/// Read event on the file descriptor.
+constexpr int kEventLoopRead = AE_READABLE;
+
+/// Write event on the file descriptor.
+constexpr int kEventLoopWrite = AE_WRITABLE;
+
+typedef long long TimerID;  // NOLINT
+
+class EventLoop {
+ public:
+  // Signature of the handler that will be called when there is a new event
+  // on the file descriptor that this handler has been registered for.
+  //
+  // The arguments are the event flags (read or write).
+  using FileCallback = std::function<void(int)>;
+
+  // This handler will be called when a timer times out. The timer id is
+  // passed as an argument. The return is the number of milliseconds the timer
+  // shall be reset to or kEventLoopTimerDone if the timer shall not be
+  // triggered again.
+  using TimerCallback = std::function<int(int64_t)>;
+
+  EventLoop();
+
+  /// Add a new file event handler to the event loop.
+  ///
+  /// @param fd The file descriptor we are listening to.
+  /// @param events The flags for events we are listening to (read or write).
+  /// @param callback The callback that will be called when the event happens.
+  /// @return Returns true if the event handler was added successfully.
+  bool add_file_event(int fd, int events, const FileCallback& callback);
+
+  /// Remove a file event handler from the event loop.
+  ///
+  /// @param fd The file descriptor of the event handler.
+  /// @return Void.
+  void remove_file_event(int fd);
+
+  /// Register a handler that will be called after a time slice of
+  ///  "timeout" milliseconds.
+  ///
+  ///  @param timeout The timeout in milliseconds.
+  ///  @param callback The callback for the timeout.
+  ///  @return The ID of the newly created timer.
+  int64_t add_timer(int64_t timeout, const TimerCallback& callback);
+
+  /// Remove a timer handler from the event loop.
+  ///
+  /// @param timer_id The ID of the timer that is to be removed.
+  /// @return The ae.c error code. TODO(pcm): needs to be standardized
+  int remove_timer(int64_t timer_id);
+
+  /// Run the event loop.
+  ///
+  /// @return Void.
+  void run();
+
+ private:
+  static void file_event_callback(aeEventLoop* loop, int fd, void* context, int events);
+
+  static int timer_event_callback(aeEventLoop* loop, TimerID timer_id, void* context);
+
+  aeEventLoop* loop_;
+  std::unordered_map<int, std::unique_ptr<FileCallback>> file_callbacks_;
+  std::unordered_map<int64_t, std::unique_ptr<TimerCallback>> timer_callbacks_;
+};
+
+#endif  // PLASMA_EVENTS

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/eviction_policy.cc
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/eviction_policy.cc b/cpp/src/plasma/eviction_policy.cc
new file mode 100644
index 0000000..4ae6384
--- /dev/null
+++ b/cpp/src/plasma/eviction_policy.cc
@@ -0,0 +1,107 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "plasma/eviction_policy.h"
+
+#include <algorithm>
+
+void LRUCache::add(const ObjectID& key, int64_t size) {
+  auto it = item_map_.find(key);
+  ARROW_CHECK(it == item_map_.end());
+  /* Note that it is important to use a list so the iterators stay valid. */
+  item_list_.emplace_front(key, size);
+  item_map_.emplace(key, item_list_.begin());
+}
+
+void LRUCache::remove(const ObjectID& key) {
+  auto it = item_map_.find(key);
+  ARROW_CHECK(it != item_map_.end());
+  item_list_.erase(it->second);
+  item_map_.erase(it);
+}
+
+int64_t LRUCache::choose_objects_to_evict(
+    int64_t num_bytes_required, std::vector<ObjectID>* objects_to_evict) {
+  int64_t bytes_evicted = 0;
+  auto it = item_list_.end();
+  while (bytes_evicted < num_bytes_required && it != item_list_.begin()) {
+    it--;
+    objects_to_evict->push_back(it->first);
+    bytes_evicted += it->second;
+  }
+  return bytes_evicted;
+}
+
+EvictionPolicy::EvictionPolicy(PlasmaStoreInfo* store_info)
+    : memory_used_(0), store_info_(store_info) {}
+
+int64_t EvictionPolicy::choose_objects_to_evict(
+    int64_t num_bytes_required, std::vector<ObjectID>* objects_to_evict) {
+  int64_t bytes_evicted =
+      cache_.choose_objects_to_evict(num_bytes_required, objects_to_evict);
+  /* Update the LRU cache. */
+  for (auto& object_id : *objects_to_evict) {
+    cache_.remove(object_id);
+  }
+  /* Update the number of bytes used. */
+  memory_used_ -= bytes_evicted;
+  return bytes_evicted;
+}
+
+void EvictionPolicy::object_created(const ObjectID& object_id) {
+  auto entry = store_info_->objects[object_id].get();
+  cache_.add(object_id, entry->info.data_size + entry->info.metadata_size);
+}
+
+bool EvictionPolicy::require_space(
+    int64_t size, std::vector<ObjectID>* objects_to_evict) {
+  /* Check if there is enough space to create the object. */
+  int64_t required_space = memory_used_ + size - store_info_->memory_capacity;
+  int64_t num_bytes_evicted;
+  if (required_space > 0) {
+    /* Try to free up at least as much space as we need right now but ideally
+     * up to 20% of the total capacity. */
+    int64_t space_to_free = std::max(size, store_info_->memory_capacity / 5);
+    ARROW_LOG(DEBUG) << "not enough space to create this object, so evicting objects";
+    /* Choose some objects to evict, and update the return pointers. */
+    num_bytes_evicted = choose_objects_to_evict(space_to_free, objects_to_evict);
+    ARROW_LOG(INFO) << "There is not enough space to create this object, so evicting "
+                    << objects_to_evict->size() << " objects to free up "
+                    << num_bytes_evicted << " bytes.";
+  } else {
+    num_bytes_evicted = 0;
+  }
+  if (num_bytes_evicted >= required_space) {
+    /* We only increment the space used if there is enough space to create the
+     * object. */
+    memory_used_ += size;
+  }
+  return num_bytes_evicted >= required_space;
+}
+
+void EvictionPolicy::begin_object_access(
+    const ObjectID& object_id, std::vector<ObjectID>* objects_to_evict) {
+  /* If the object is in the LRU cache, remove it. */
+  cache_.remove(object_id);
+}
+
+void EvictionPolicy::end_object_access(
+    const ObjectID& object_id, std::vector<ObjectID>* objects_to_evict) {
+  auto entry = store_info_->objects[object_id].get();
+  /* Add the object to the LRU cache.*/
+  cache_.add(object_id, entry->info.data_size + entry->info.metadata_size);
+}

http://git-wip-us.apache.org/repos/asf/arrow/blob/5e343098/cpp/src/plasma/eviction_policy.h
----------------------------------------------------------------------
diff --git a/cpp/src/plasma/eviction_policy.h b/cpp/src/plasma/eviction_policy.h
new file mode 100644
index 0000000..3815fc6
--- /dev/null
+++ b/cpp/src/plasma/eviction_policy.h
@@ -0,0 +1,134 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef PLASMA_EVICTION_POLICY_H
+#define PLASMA_EVICTION_POLICY_H
+
+#include <list>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "plasma/common.h"
+#include "plasma/plasma.h"
+
+// ==== The eviction policy ====
+//
+// This file contains declaration for all functions and data structures that
+// need to be provided if you want to implement a new eviction algorithm for the
+// Plasma store.
+
+class LRUCache {
+ public:
+  LRUCache() {}
+
+  void add(const ObjectID& key, int64_t size);
+
+  void remove(const ObjectID& key);
+
+  int64_t choose_objects_to_evict(
+      int64_t num_bytes_required, std::vector<ObjectID>* objects_to_evict);
+
+ private:
+  /// A doubly-linked list containing the items in the cache and
+  /// their sizes in LRU order.
+  typedef std::list<std::pair<ObjectID, int64_t>> ItemList;
+  ItemList item_list_;
+  /// A hash table mapping the object ID of an object in the cache to its
+  /// location in the doubly linked list item_list_.
+  std::unordered_map<ObjectID, ItemList::iterator, UniqueIDHasher> item_map_;
+};
+
+/// The eviction policy.
+class EvictionPolicy {
+ public:
+  /// Construct an eviction policy.
+  ///
+  /// @param store_info Information about the Plasma store that is exposed
+  ///        to the eviction policy.
+  explicit EvictionPolicy(PlasmaStoreInfo* store_info);
+
+  /// This method will be called whenever an object is first created in order to
+  /// add it to the LRU cache. This is done so that the first time, the Plasma
+  /// store calls begin_object_access, we can remove the object from the LRU
+  /// cache.
+  ///
+  /// @param object_id The object ID of the object that was created.
+  /// @return Void.
+  void object_created(const ObjectID& object_id);
+
+  /// This method will be called when the Plasma store needs more space, perhaps
+  /// to create a new object. If the required amount of space cannot be freed up,
+  /// then a fatal error will be thrown. When this method is called, the eviction
+  /// policy will assume that the objects chosen to be evicted will in fact be
+  /// evicted from the Plasma store by the caller.
+  ///
+  /// @param size The size in bytes of the new object, including both data and
+  ///        metadata.
+  /// @param objects_to_evict The object IDs that were chosen for eviction will
+  ///        be stored into this vector.
+  /// @return True if enough space can be freed and false otherwise.
+  bool require_space(int64_t size, std::vector<ObjectID>* objects_to_evict);
+
+  /// This method will be called whenever an unused object in the Plasma store
+  /// starts to be used. When this method is called, the eviction policy will
+  /// assume that the objects chosen to be evicted will in fact be evicted from
+  /// the Plasma store by the caller.
+  ///
+  /// @param object_id The ID of the object that is now being used.
+  /// @param objects_to_evict The object IDs that were chosen for eviction will
+  ///        be stored into this vector.
+  /// @return Void.
+  void begin_object_access(
+      const ObjectID& object_id, std::vector<ObjectID>* objects_to_evict);
+
+  /// This method will be called whenever an object in the Plasma store that was
+  /// being used is no longer being used. When this method is called, the
+  /// eviction policy will assume that the objects chosen to be evicted will in
+  /// fact be evicted from the Plasma store by the caller.
+  ///
+  /// @param object_id The ID of the object that is no longer being used.
+  /// @param objects_to_evict The object IDs that were chosen for eviction will
+  ///        be stored into this vector.
+  /// @return Void.
+  void end_object_access(
+      const ObjectID& object_id, std::vector<ObjectID>* objects_to_evict);
+
+  /// Choose some objects to evict from the Plasma store. When this method is
+  /// called, the eviction policy will assume that the objects chosen to be
+  /// evicted will in fact be evicted from the Plasma store by the caller.
+  ///
+  /// @note This method is not part of the API. It is exposed in the header file
+  /// only for testing.
+  ///
+  /// @param num_bytes_required The number of bytes of space to try to free up.
+  /// @param objects_to_evict The object IDs that were chosen for eviction will
+  ///        be stored into this vector.
+  /// @return The total number of bytes of space chosen to be evicted.
+  int64_t choose_objects_to_evict(
+      int64_t num_bytes_required, std::vector<ObjectID>* objects_to_evict);
+
+ private:
+  /// The amount of memory (in bytes) currently being used.
+  int64_t memory_used_;
+  /// Pointer to the plasma store info.
+  PlasmaStoreInfo* store_info_;
+  /// Datastructure for the LRU cache.
+  LRUCache cache_;
+};
+
+#endif  // PLASMA_EVICTION_POLICY_H