You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by so...@apache.org on 2014/08/01 22:44:25 UTC

[12/20] TS-2950: Initial commit of libck.

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_pr.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_pr.h b/lib/ck/include/ck_pr.h
new file mode 100644
index 0000000..eb198f5
--- /dev/null
+++ b/lib/ck/include/ck_pr.h
@@ -0,0 +1,1152 @@
+/*
+ * Copyright 2009-2014 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_PR_H
+#define _CK_PR_H
+
+#include <ck_cc.h>
+#include <ck_limits.h>
+#include <ck_md.h>
+#include <ck_stdint.h>
+#include <stdbool.h>
+
+#if defined(__x86_64__)
+#include "gcc/x86_64/ck_pr.h"
+#elif defined(__x86__)
+#include "gcc/x86/ck_pr.h"
+#elif defined(__sparcv9__)
+#include "gcc/sparcv9/ck_pr.h"
+#elif defined(__ppc64__)
+#include "gcc/ppc64/ck_pr.h"
+#elif defined(__ppc__)
+#include "gcc/ppc/ck_pr.h"
+#elif defined(__arm__)
+#include "gcc/arm/ck_pr.h"
+#elif !defined(__GNUC__)
+#error Your platform is unsupported
+#endif
+
+#if defined(__GNUC__)
+#include "gcc/ck_pr.h"
+#endif
+
+#define CK_PR_FENCE_EMIT(T)			\
+	CK_CC_INLINE static void		\
+	ck_pr_fence_##T(void)			\
+	{					\
+		ck_pr_fence_strict_##T();	\
+		return;				\
+	}
+#define CK_PR_FENCE_NOOP(T)			\
+	CK_CC_INLINE static void		\
+	ck_pr_fence_##T(void)			\
+	{					\
+		ck_pr_barrier();		\
+		return;				\
+	}
+
+/*
+ * None of the currently supported platforms allow for data-dependent
+ * load ordering.
+ */
+CK_PR_FENCE_NOOP(load_depends)
+#define ck_pr_fence_strict_load_depends ck_pr_fence_load_depends
+
+/*
+ * In memory models where atomic operations do not have serializing
+ * effects, atomic read-modify-write operations are modeled as stores.
+ */
+#if defined(CK_MD_RMO)
+/*
+ * Only stores to the same location have a global
+ * ordering.
+ */
+CK_PR_FENCE_EMIT(atomic)
+CK_PR_FENCE_EMIT(atomic_load)
+CK_PR_FENCE_EMIT(atomic_store)
+CK_PR_FENCE_EMIT(store_atomic)
+CK_PR_FENCE_EMIT(load_atomic)
+CK_PR_FENCE_EMIT(load_store)
+CK_PR_FENCE_EMIT(store_load)
+CK_PR_FENCE_EMIT(load)
+CK_PR_FENCE_EMIT(store)
+CK_PR_FENCE_EMIT(memory)
+CK_PR_FENCE_EMIT(acquire)
+CK_PR_FENCE_EMIT(release)
+#elif defined(CK_MD_PSO)
+/*
+ * Anything can be re-ordered with respect to stores.
+ * Otherwise, loads are executed in-order.
+ */
+CK_PR_FENCE_EMIT(atomic)
+CK_PR_FENCE_NOOP(atomic_load)
+CK_PR_FENCE_EMIT(atomic_store)
+CK_PR_FENCE_EMIT(store_atomic)
+CK_PR_FENCE_NOOP(load_atomic)
+CK_PR_FENCE_EMIT(load_store)
+CK_PR_FENCE_EMIT(store_load)
+CK_PR_FENCE_NOOP(load)
+CK_PR_FENCE_EMIT(store)
+CK_PR_FENCE_EMIT(memory)
+CK_PR_FENCE_EMIT(acquire)
+CK_PR_FENCE_EMIT(release)
+#elif defined(CK_MD_TSO)
+/*
+ * Only loads are re-ordered and only with respect to
+ * prior stores. Atomic operations are serializing.
+ */
+CK_PR_FENCE_NOOP(atomic)
+CK_PR_FENCE_NOOP(atomic_load)
+CK_PR_FENCE_NOOP(atomic_store)
+CK_PR_FENCE_NOOP(store_atomic)
+CK_PR_FENCE_NOOP(load_atomic)
+CK_PR_FENCE_NOOP(load_store)
+CK_PR_FENCE_EMIT(store_load)
+CK_PR_FENCE_NOOP(load)
+CK_PR_FENCE_NOOP(store)
+CK_PR_FENCE_NOOP(memory)
+CK_PR_FENCE_NOOP(acquire)
+CK_PR_FENCE_NOOP(release)
+#else
+#error "No memory model has been defined."
+#endif /* CK_MD_TSO */
+
+#undef CK_PR_FENCE_EMIT
+#undef CK_PR_FENCE_NOOP
+
+#define CK_PR_BIN(K, S, M, T, P, C)					\
+	CK_CC_INLINE static void					\
+	ck_pr_##K##_##S(M *target, T value)				\
+	{								\
+		T previous;						\
+		C punt;							\
+		punt = ck_pr_load_##S(target);				\
+		previous = (T)punt;					\
+		while (ck_pr_cas_##S##_value(target,			\
+					     (C)previous,		\
+					     (C)(previous P value),	\
+					     &previous) == false)	\
+			ck_pr_stall();					\
+									\
+		return;							\
+	}
+
+#define CK_PR_BIN_S(K, S, T, P) CK_PR_BIN(K, S, T, T, P, T)
+
+#if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE)
+
+#ifndef CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_CHAR
+CK_PR_BIN_S(add, char, char, +)
+#endif /* CK_F_PR_ADD_CHAR */
+
+#ifndef CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_CHAR
+CK_PR_BIN_S(sub, char, char, -)
+#endif /* CK_F_PR_SUB_CHAR */
+
+#ifndef CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_CHAR
+CK_PR_BIN_S(and, char, char, &)
+#endif /* CK_F_PR_AND_CHAR */
+
+#ifndef CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_CHAR
+CK_PR_BIN_S(xor, char, char, ^)
+#endif /* CK_F_PR_XOR_CHAR */
+
+#ifndef CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_CHAR
+CK_PR_BIN_S(or, char, char, |)
+#endif /* CK_F_PR_OR_CHAR */
+
+#endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_INT
+CK_PR_BIN_S(add, int, int, +)
+#endif /* CK_F_PR_ADD_INT */
+
+#ifndef CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_INT
+CK_PR_BIN_S(sub, int, int, -)
+#endif /* CK_F_PR_SUB_INT */
+
+#ifndef CK_F_PR_AND_INT
+#define CK_F_PR_AND_INT
+CK_PR_BIN_S(and, int, int, &)
+#endif /* CK_F_PR_AND_INT */
+
+#ifndef CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_INT
+CK_PR_BIN_S(xor, int, int, ^)
+#endif /* CK_F_PR_XOR_INT */
+
+#ifndef CK_F_PR_OR_INT
+#define CK_F_PR_OR_INT
+CK_PR_BIN_S(or, int, int, |)
+#endif /* CK_F_PR_OR_INT */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE)
+
+#ifndef CK_F_PR_ADD_DOUBLE
+#define CK_F_PR_ADD_DOUBLE
+CK_PR_BIN_S(add, double, double, +)
+#endif /* CK_F_PR_ADD_DOUBLE */
+
+#ifndef CK_F_PR_SUB_DOUBLE
+#define CK_F_PR_SUB_DOUBLE
+CK_PR_BIN_S(sub, double, double, -)
+#endif /* CK_F_PR_SUB_DOUBLE */
+
+#endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_ADD_UINT
+#define CK_F_PR_ADD_UINT
+CK_PR_BIN_S(add, uint, unsigned int, +)
+#endif /* CK_F_PR_ADD_UINT */
+
+#ifndef CK_F_PR_SUB_UINT
+#define CK_F_PR_SUB_UINT
+CK_PR_BIN_S(sub, uint, unsigned int, -)
+#endif /* CK_F_PR_SUB_UINT */
+
+#ifndef CK_F_PR_AND_UINT
+#define CK_F_PR_AND_UINT
+CK_PR_BIN_S(and, uint, unsigned int, &)
+#endif /* CK_F_PR_AND_UINT */
+
+#ifndef CK_F_PR_XOR_UINT
+#define CK_F_PR_XOR_UINT
+CK_PR_BIN_S(xor, uint, unsigned int, ^)
+#endif /* CK_F_PR_XOR_UINT */
+
+#ifndef CK_F_PR_OR_UINT
+#define CK_F_PR_OR_UINT
+CK_PR_BIN_S(or, uint, unsigned int, |)
+#endif /* CK_F_PR_OR_UINT */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_PTR
+CK_PR_BIN(add, ptr, void, uintptr_t, +, void *)
+#endif /* CK_F_PR_ADD_PTR */
+
+#ifndef CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_PTR
+CK_PR_BIN(sub, ptr, void, uintptr_t, -, void *)
+#endif /* CK_F_PR_SUB_PTR */
+
+#ifndef CK_F_PR_AND_PTR
+#define CK_F_PR_AND_PTR
+CK_PR_BIN(and, ptr, void, uintptr_t, &, void *)
+#endif /* CK_F_PR_AND_PTR */
+
+#ifndef CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_PTR
+CK_PR_BIN(xor, ptr, void, uintptr_t, ^, void *)
+#endif /* CK_F_PR_XOR_PTR */
+
+#ifndef CK_F_PR_OR_PTR
+#define CK_F_PR_OR_PTR
+CK_PR_BIN(or, ptr, void, uintptr_t, |, void *)
+#endif /* CK_F_PR_OR_PTR */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_ADD_64
+#define CK_F_PR_ADD_64
+CK_PR_BIN_S(add, 64, uint64_t, +)
+#endif /* CK_F_PR_ADD_64 */
+
+#ifndef CK_F_PR_SUB_64
+#define CK_F_PR_SUB_64
+CK_PR_BIN_S(sub, 64, uint64_t, -)
+#endif /* CK_F_PR_SUB_64 */
+
+#ifndef CK_F_PR_AND_64
+#define CK_F_PR_AND_64
+CK_PR_BIN_S(and, 64, uint64_t, &)
+#endif /* CK_F_PR_AND_64 */
+
+#ifndef CK_F_PR_XOR_64
+#define CK_F_PR_XOR_64
+CK_PR_BIN_S(xor, 64, uint64_t, ^)
+#endif /* CK_F_PR_XOR_64 */
+
+#ifndef CK_F_PR_OR_64
+#define CK_F_PR_OR_64
+CK_PR_BIN_S(or, 64, uint64_t, |)
+#endif /* CK_F_PR_OR_64 */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_ADD_32
+#define CK_F_PR_ADD_32
+CK_PR_BIN_S(add, 32, uint32_t, +)
+#endif /* CK_F_PR_ADD_32 */
+
+#ifndef CK_F_PR_SUB_32
+#define CK_F_PR_SUB_32
+CK_PR_BIN_S(sub, 32, uint32_t, -)
+#endif /* CK_F_PR_SUB_32 */
+
+#ifndef CK_F_PR_AND_32
+#define CK_F_PR_AND_32
+CK_PR_BIN_S(and, 32, uint32_t, &)
+#endif /* CK_F_PR_AND_32 */
+
+#ifndef CK_F_PR_XOR_32
+#define CK_F_PR_XOR_32
+CK_PR_BIN_S(xor, 32, uint32_t, ^)
+#endif /* CK_F_PR_XOR_32 */
+
+#ifndef CK_F_PR_OR_32
+#define CK_F_PR_OR_32
+CK_PR_BIN_S(or, 32, uint32_t, |)
+#endif /* CK_F_PR_OR_32 */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_ADD_16
+#define CK_F_PR_ADD_16
+CK_PR_BIN_S(add, 16, uint16_t, +)
+#endif /* CK_F_PR_ADD_16 */
+
+#ifndef CK_F_PR_SUB_16
+#define CK_F_PR_SUB_16
+CK_PR_BIN_S(sub, 16, uint16_t, -)
+#endif /* CK_F_PR_SUB_16 */
+
+#ifndef CK_F_PR_AND_16
+#define CK_F_PR_AND_16
+CK_PR_BIN_S(and, 16, uint16_t, &)
+#endif /* CK_F_PR_AND_16 */
+
+#ifndef CK_F_PR_XOR_16
+#define CK_F_PR_XOR_16
+CK_PR_BIN_S(xor, 16, uint16_t, ^)
+#endif /* CK_F_PR_XOR_16 */
+
+#ifndef CK_F_PR_OR_16
+#define CK_F_PR_OR_16
+CK_PR_BIN_S(or, 16, uint16_t, |)
+#endif /* CK_F_PR_OR_16 */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE)
+
+#ifndef CK_F_PR_ADD_8
+#define CK_F_PR_ADD_8
+CK_PR_BIN_S(add, 8, uint8_t, +)
+#endif /* CK_F_PR_ADD_8 */
+
+#ifndef CK_F_PR_SUB_8
+#define CK_F_PR_SUB_8
+CK_PR_BIN_S(sub, 8, uint8_t, -)
+#endif /* CK_F_PR_SUB_8 */
+
+#ifndef CK_F_PR_AND_8
+#define CK_F_PR_AND_8
+CK_PR_BIN_S(and, 8, uint8_t, &)
+#endif /* CK_F_PR_AND_8 */
+
+#ifndef CK_F_PR_XOR_8
+#define CK_F_PR_XOR_8
+CK_PR_BIN_S(xor, 8, uint8_t, ^)
+#endif /* CK_F_PR_XOR_8 */
+
+#ifndef CK_F_PR_OR_8
+#define CK_F_PR_OR_8
+CK_PR_BIN_S(or, 8, uint8_t, |)
+#endif /* CK_F_PR_OR_8 */
+
+#endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */
+
+#undef CK_PR_BIN_S
+#undef CK_PR_BIN
+
+#define CK_PR_BTX(K, S, M, T, P, C, R)						   \
+	CK_CC_INLINE static bool						   \
+	ck_pr_##K##_##S(M *target, unsigned int offset)				   \
+	{									   \
+		T previous;							   \
+		C punt;								   \
+		punt = ck_pr_load_##S(target);					   \
+		previous = (T)punt;						   \
+		while (ck_pr_cas_##S##_value(target, (C)previous,		   \
+			(C)(previous P (R ((T)1 << offset))), &previous) == false) \
+				ck_pr_stall();					   \
+		return ((previous >> offset) & 1);				   \
+	}
+
+#define CK_PR_BTX_S(K, S, T, P, R) CK_PR_BTX(K, S, T, T, P, T, R)
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_BTC_INT
+#define CK_F_PR_BTC_INT
+CK_PR_BTX_S(btc, int, int, ^,)
+#endif /* CK_F_PR_BTC_INT */
+
+#ifndef CK_F_PR_BTR_INT
+#define CK_F_PR_BTR_INT
+CK_PR_BTX_S(btr, int, int, &, ~)
+#endif /* CK_F_PR_BTR_INT */
+
+#ifndef CK_F_PR_BTS_INT
+#define CK_F_PR_BTS_INT
+CK_PR_BTX_S(bts, int, int, |,)
+#endif /* CK_F_PR_BTS_INT */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_BTC_UINT
+#define CK_F_PR_BTC_UINT
+CK_PR_BTX_S(btc, uint, unsigned int, ^,)
+#endif /* CK_F_PR_BTC_UINT */
+
+#ifndef CK_F_PR_BTR_UINT
+#define CK_F_PR_BTR_UINT
+CK_PR_BTX_S(btr, uint, unsigned int, &, ~)
+#endif /* CK_F_PR_BTR_UINT */
+
+#ifndef CK_F_PR_BTS_UINT
+#define CK_F_PR_BTS_UINT
+CK_PR_BTX_S(bts, uint, unsigned int, |,)
+#endif /* CK_F_PR_BTS_UINT */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_BTC_PTR
+#define CK_F_PR_BTC_PTR
+CK_PR_BTX(btc, ptr, void, uintptr_t, ^, void *,)
+#endif /* CK_F_PR_BTC_PTR */
+
+#ifndef CK_F_PR_BTR_PTR
+#define CK_F_PR_BTR_PTR
+CK_PR_BTX(btr, ptr, void, uintptr_t, &, void *, ~)
+#endif /* CK_F_PR_BTR_PTR */
+
+#ifndef CK_F_PR_BTS_PTR
+#define CK_F_PR_BTS_PTR
+CK_PR_BTX(bts, ptr, void, uintptr_t, |, void *,)
+#endif /* CK_F_PR_BTS_PTR */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_BTC_64
+#define CK_F_PR_BTC_64
+CK_PR_BTX_S(btc, 64, uint64_t, ^,)
+#endif /* CK_F_PR_BTC_64 */
+
+#ifndef CK_F_PR_BTR_64
+#define CK_F_PR_BTR_64
+CK_PR_BTX_S(btr, 64, uint64_t, &, ~)
+#endif /* CK_F_PR_BTR_64 */
+
+#ifndef CK_F_PR_BTS_64
+#define CK_F_PR_BTS_64
+CK_PR_BTX_S(bts, 64, uint64_t, |,)
+#endif /* CK_F_PR_BTS_64 */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_BTC_32
+#define CK_F_PR_BTC_32
+CK_PR_BTX_S(btc, 32, uint32_t, ^,)
+#endif /* CK_F_PR_BTC_32 */
+
+#ifndef CK_F_PR_BTR_32
+#define CK_F_PR_BTR_32
+CK_PR_BTX_S(btr, 32, uint32_t, &, ~)
+#endif /* CK_F_PR_BTR_32 */
+
+#ifndef CK_F_PR_BTS_32
+#define CK_F_PR_BTS_32
+CK_PR_BTX_S(bts, 32, uint32_t, |,)
+#endif /* CK_F_PR_BTS_32 */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_BTC_16
+#define CK_F_PR_BTC_16
+CK_PR_BTX_S(btc, 16, uint16_t, ^,)
+#endif /* CK_F_PR_BTC_16 */
+
+#ifndef CK_F_PR_BTR_16
+#define CK_F_PR_BTR_16
+CK_PR_BTX_S(btr, 16, uint16_t, &, ~)
+#endif /* CK_F_PR_BTR_16 */
+
+#ifndef CK_F_PR_BTS_16
+#define CK_F_PR_BTS_16
+CK_PR_BTX_S(bts, 16, uint16_t, |,)
+#endif /* CK_F_PR_BTS_16 */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#undef CK_PR_BTX_S
+#undef CK_PR_BTX
+
+#define CK_PR_UNARY(K, X, S, M, T)					\
+	CK_CC_INLINE static void					\
+	ck_pr_##K##_##S(M *target)					\
+	{								\
+		ck_pr_##X##_##S(target, (T)1);				\
+		return;							\
+	}
+
+#define CK_PR_UNARY_Z(K, S, M, T, P, C, Z)				\
+	CK_CC_INLINE static void					\
+	ck_pr_##K##_##S##_zero(M *target, bool *zero)			\
+	{								\
+		T previous;						\
+		C punt;							\
+		punt = (C)ck_pr_load_##S(target);			\
+		previous = (T)punt;					\
+		while (ck_pr_cas_##S##_value(target,			\
+					     (C)previous,		\
+					     (C)(previous P 1),		\
+					     &previous) == false)	\
+			ck_pr_stall();					\
+		*zero = previous == (T)Z;				\
+		return;							\
+	}
+
+#define CK_PR_UNARY_S(K, X, S, M) CK_PR_UNARY(K, X, S, M, M)
+#define CK_PR_UNARY_Z_S(K, S, M, P, Z) CK_PR_UNARY_Z(K, S, M, M, P, M, Z)
+
+#if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE)
+
+#ifndef CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_CHAR
+CK_PR_UNARY_S(inc, add, char, char)
+#endif /* CK_F_PR_INC_CHAR */
+
+#ifndef CK_F_PR_INC_CHAR_ZERO
+#define CK_F_PR_INC_CHAR_ZERO
+CK_PR_UNARY_Z_S(inc, char, char, +, -1)
+#endif /* CK_F_PR_INC_CHAR_ZERO */
+
+#ifndef CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_CHAR
+CK_PR_UNARY_S(dec, sub, char, char)
+#endif /* CK_F_PR_DEC_CHAR */
+
+#ifndef CK_F_PR_DEC_CHAR_ZERO
+#define CK_F_PR_DEC_CHAR_ZERO
+CK_PR_UNARY_Z_S(dec, char, char, -, 1)
+#endif /* CK_F_PR_DEC_CHAR_ZERO */
+
+#endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_INC_INT
+#define CK_F_PR_INC_INT
+CK_PR_UNARY_S(inc, add, int, int)
+#endif /* CK_F_PR_INC_INT */
+
+#ifndef CK_F_PR_INC_INT_ZERO
+#define CK_F_PR_INC_INT_ZERO
+CK_PR_UNARY_Z_S(inc, int, int, +, -1)
+#endif /* CK_F_PR_INC_INT_ZERO */
+
+#ifndef CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_INT
+CK_PR_UNARY_S(dec, sub, int, int)
+#endif /* CK_F_PR_DEC_INT */
+
+#ifndef CK_F_PR_DEC_INT_ZERO
+#define CK_F_PR_DEC_INT_ZERO
+CK_PR_UNARY_Z_S(dec, int, int, -, 1)
+#endif /* CK_F_PR_DEC_INT_ZERO */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE)
+
+#ifndef CK_F_PR_INC_DOUBLE
+#define CK_F_PR_INC_DOUBLE
+CK_PR_UNARY_S(inc, add, double, double)
+#endif /* CK_F_PR_INC_DOUBLE */
+
+#ifndef CK_F_PR_DEC_DOUBLE
+#define CK_F_PR_DEC_DOUBLE
+CK_PR_UNARY_S(dec, sub, double, double)
+#endif /* CK_F_PR_DEC_DOUBLE */
+
+#endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_INC_UINT
+#define CK_F_PR_INC_UINT
+CK_PR_UNARY_S(inc, add, uint, unsigned int)
+#endif /* CK_F_PR_INC_UINT */
+
+#ifndef CK_F_PR_INC_UINT_ZERO
+#define CK_F_PR_INC_UINT_ZERO
+CK_PR_UNARY_Z_S(inc, uint, unsigned int, +, UINT_MAX)
+#endif /* CK_F_PR_INC_UINT_ZERO */
+
+#ifndef CK_F_PR_DEC_UINT
+#define CK_F_PR_DEC_UINT
+CK_PR_UNARY_S(dec, sub, uint, unsigned int)
+#endif /* CK_F_PR_DEC_UINT */
+
+#ifndef CK_F_PR_DEC_UINT_ZERO
+#define CK_F_PR_DEC_UINT_ZERO
+CK_PR_UNARY_Z_S(dec, uint, unsigned int, -, 1)
+#endif /* CK_F_PR_DEC_UINT_ZERO */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_INC_PTR
+#define CK_F_PR_INC_PTR
+CK_PR_UNARY(inc, add, ptr, void, uintptr_t)
+#endif /* CK_F_PR_INC_PTR */
+
+#ifndef CK_F_PR_INC_PTR_ZERO
+#define CK_F_PR_INC_PTR_ZERO
+CK_PR_UNARY_Z(inc, ptr, void, uintptr_t, +, void *, UINT_MAX)
+#endif /* CK_F_PR_INC_PTR_ZERO */
+
+#ifndef CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_PTR
+CK_PR_UNARY(dec, sub, ptr, void, uintptr_t)
+#endif /* CK_F_PR_DEC_PTR */
+
+#ifndef CK_F_PR_DEC_PTR_ZERO
+#define CK_F_PR_DEC_PTR_ZERO
+CK_PR_UNARY_Z(dec, ptr, void, uintptr_t, -, void *, 1)
+#endif /* CK_F_PR_DEC_PTR_ZERO */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_INC_64
+#define CK_F_PR_INC_64
+CK_PR_UNARY_S(inc, add, 64, uint64_t)
+#endif /* CK_F_PR_INC_64 */
+
+#ifndef CK_F_PR_INC_64_ZERO
+#define CK_F_PR_INC_64_ZERO
+CK_PR_UNARY_Z_S(inc, 64, uint64_t, +, UINT64_MAX)
+#endif /* CK_F_PR_INC_64_ZERO */
+
+#ifndef CK_F_PR_DEC_64
+#define CK_F_PR_DEC_64
+CK_PR_UNARY_S(dec, sub, 64, uint64_t)
+#endif /* CK_F_PR_DEC_64 */
+
+#ifndef CK_F_PR_DEC_64_ZERO
+#define CK_F_PR_DEC_64_ZERO
+CK_PR_UNARY_Z_S(dec, 64, uint64_t, -, 1)
+#endif /* CK_F_PR_DEC_64_ZERO */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_INC_32
+#define CK_F_PR_INC_32
+CK_PR_UNARY_S(inc, add, 32, uint32_t)
+#endif /* CK_F_PR_INC_32 */
+
+#ifndef CK_F_PR_INC_32_ZERO
+#define CK_F_PR_INC_32_ZERO
+CK_PR_UNARY_Z_S(inc, 32, uint32_t, +, UINT32_MAX)
+#endif /* CK_F_PR_INC_32_ZERO */
+
+#ifndef CK_F_PR_DEC_32
+#define CK_F_PR_DEC_32
+CK_PR_UNARY_S(dec, sub, 32, uint32_t)
+#endif /* CK_F_PR_DEC_32 */
+
+#ifndef CK_F_PR_DEC_32_ZERO
+#define CK_F_PR_DEC_32_ZERO
+CK_PR_UNARY_Z_S(dec, 32, uint32_t, -, 1)
+#endif /* CK_F_PR_DEC_32_ZERO */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_INC_16
+#define CK_F_PR_INC_16
+CK_PR_UNARY_S(inc, add, 16, uint16_t)
+#endif /* CK_F_PR_INC_16 */
+
+#ifndef CK_F_PR_INC_16_ZERO
+#define CK_F_PR_INC_16_ZERO
+CK_PR_UNARY_Z_S(inc, 16, uint16_t, +, UINT16_MAX)
+#endif /* CK_F_PR_INC_16_ZERO */
+
+#ifndef CK_F_PR_DEC_16
+#define CK_F_PR_DEC_16
+CK_PR_UNARY_S(dec, sub, 16, uint16_t)
+#endif /* CK_F_PR_DEC_16 */
+
+#ifndef CK_F_PR_DEC_16_ZERO
+#define CK_F_PR_DEC_16_ZERO
+CK_PR_UNARY_Z_S(dec, 16, uint16_t, -, 1)
+#endif /* CK_F_PR_DEC_16_ZERO */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE)
+
+#ifndef CK_F_PR_INC_8
+#define CK_F_PR_INC_8
+CK_PR_UNARY_S(inc, add, 8, uint8_t)
+#endif /* CK_F_PR_INC_8 */
+
+#ifndef CK_F_PR_INC_8_ZERO
+#define CK_F_PR_INC_8_ZERO
+CK_PR_UNARY_Z_S(inc, 8, uint8_t, +, UINT8_MAX)
+#endif /* CK_F_PR_INC_8_ZERO */
+
+#ifndef CK_F_PR_DEC_8
+#define CK_F_PR_DEC_8
+CK_PR_UNARY_S(dec, sub, 8, uint8_t)
+#endif /* CK_F_PR_DEC_8 */
+
+#ifndef CK_F_PR_DEC_8_ZERO
+#define CK_F_PR_DEC_8_ZERO
+CK_PR_UNARY_Z_S(dec, 8, uint8_t, -, 1)
+#endif /* CK_F_PR_DEC_8_ZERO */
+
+#endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */
+
+#undef CK_PR_UNARY_Z_S
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY_Z
+#undef CK_PR_UNARY
+
+#define CK_PR_N(K, S, M, T, P, C)					\
+	CK_CC_INLINE static void					\
+	ck_pr_##K##_##S(M *target)					\
+	{								\
+		T previous;						\
+		C punt;							\
+		punt = (C)ck_pr_load_##S(target);			\
+		previous = (T)punt;					\
+		while (ck_pr_cas_##S##_value(target,			\
+					     (C)previous,		\
+					     (C)(P previous),		\
+					     &previous) == false)	\
+			ck_pr_stall();					\
+									\
+		return;							\
+	}
+
+#define CK_PR_N_Z(S, M, T, C)						\
+	CK_CC_INLINE static void					\
+	ck_pr_neg_##S##_zero(M *target, bool *zero)			\
+	{								\
+		T previous;						\
+		C punt;							\
+		punt = (C)ck_pr_load_##S(target);			\
+		previous = (T)punt;					\
+		while (ck_pr_cas_##S##_value(target,			\
+					     (C)previous,		\
+					     (C)(-previous),		\
+					     &previous) == false)	\
+			ck_pr_stall();					\
+									\
+		*zero = previous == 0;					\
+		return;							\
+	}
+
+#define CK_PR_N_S(K, S, M, P)	CK_PR_N(K, S, M, M, P, M)
+#define CK_PR_N_Z_S(S, M) 	CK_PR_N_Z(S, M, M, M)
+
+#if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE)
+
+#ifndef CK_F_PR_NOT_CHAR
+#define CK_F_PR_NOT_CHAR
+CK_PR_N_S(not, char, char, ~)
+#endif /* CK_F_PR_NOT_CHAR */
+
+#ifndef CK_F_PR_NEG_CHAR
+#define CK_F_PR_NEG_CHAR
+CK_PR_N_S(neg, char, char, -)
+#endif /* CK_F_PR_NEG_CHAR */
+
+#ifndef CK_F_PR_NEG_CHAR_ZERO
+#define CK_F_PR_NEG_CHAR_ZERO
+CK_PR_N_Z_S(char, char)
+#endif /* CK_F_PR_NEG_CHAR_ZERO */
+
+#endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_INT
+CK_PR_N_S(not, int, int, ~)
+#endif /* CK_F_PR_NOT_INT */
+
+#ifndef CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_INT
+CK_PR_N_S(neg, int, int, -)
+#endif /* CK_F_PR_NEG_INT */
+
+#ifndef CK_F_PR_NEG_INT_ZERO
+#define CK_F_PR_NEG_INT_ZERO
+CK_PR_N_Z_S(int, int)
+#endif /* CK_F_PR_NEG_INT_ZERO */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE)
+
+#ifndef CK_F_PR_NEG_DOUBLE
+#define CK_F_PR_NEG_DOUBLE
+CK_PR_N_S(neg, double, double, -)
+#endif /* CK_F_PR_NEG_DOUBLE */
+
+#endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_NOT_UINT
+#define CK_F_PR_NOT_UINT
+CK_PR_N_S(not, uint, unsigned int, ~)
+#endif /* CK_F_PR_NOT_UINT */
+
+#ifndef CK_F_PR_NEG_UINT
+#define CK_F_PR_NEG_UINT
+CK_PR_N_S(neg, uint, unsigned int, -)
+#endif /* CK_F_PR_NEG_UINT */
+
+#ifndef CK_F_PR_NEG_UINT_ZERO
+#define CK_F_PR_NEG_UINT_ZERO
+CK_PR_N_Z_S(uint, unsigned int)
+#endif /* CK_F_PR_NEG_UINT_ZERO */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_PTR
+CK_PR_N(not, ptr, void, uintptr_t, ~, void *)
+#endif /* CK_F_PR_NOT_PTR */
+
+#ifndef CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_PTR
+CK_PR_N(neg, ptr, void, uintptr_t, -, void *)
+#endif /* CK_F_PR_NEG_PTR */
+
+#ifndef CK_F_PR_NEG_PTR_ZERO
+#define CK_F_PR_NEG_PTR_ZERO
+CK_PR_N_Z(ptr, void, uintptr_t, void *)
+#endif /* CK_F_PR_NEG_PTR_ZERO */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_NOT_64
+#define CK_F_PR_NOT_64
+CK_PR_N_S(not, 64, uint64_t, ~)
+#endif /* CK_F_PR_NOT_64 */
+
+#ifndef CK_F_PR_NEG_64
+#define CK_F_PR_NEG_64
+CK_PR_N_S(neg, 64, uint64_t, -)
+#endif /* CK_F_PR_NEG_64 */
+
+#ifndef CK_F_PR_NEG_64_ZERO
+#define CK_F_PR_NEG_64_ZERO
+CK_PR_N_Z_S(64, uint64_t)
+#endif /* CK_F_PR_NEG_64_ZERO */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_NOT_32
+#define CK_F_PR_NOT_32
+CK_PR_N_S(not, 32, uint32_t, ~)
+#endif /* CK_F_PR_NOT_32 */
+
+#ifndef CK_F_PR_NEG_32
+#define CK_F_PR_NEG_32
+CK_PR_N_S(neg, 32, uint32_t, -)
+#endif /* CK_F_PR_NEG_32 */
+
+#ifndef CK_F_PR_NEG_32_ZERO
+#define CK_F_PR_NEG_32_ZERO
+CK_PR_N_Z_S(32, uint32_t)
+#endif /* CK_F_PR_NEG_32_ZERO */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_NOT_16
+#define CK_F_PR_NOT_16
+CK_PR_N_S(not, 16, uint16_t, ~)
+#endif /* CK_F_PR_NOT_16 */
+
+#ifndef CK_F_PR_NEG_16
+#define CK_F_PR_NEG_16
+CK_PR_N_S(neg, 16, uint16_t, -)
+#endif /* CK_F_PR_NEG_16 */
+
+#ifndef CK_F_PR_NEG_16_ZERO
+#define CK_F_PR_NEG_16_ZERO
+CK_PR_N_Z_S(16, uint16_t)
+#endif /* CK_F_PR_NEG_16_ZERO */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE)
+
+#ifndef CK_F_PR_NOT_8
+#define CK_F_PR_NOT_8
+CK_PR_N_S(not, 8, uint8_t, ~)
+#endif /* CK_F_PR_NOT_8 */
+
+#ifndef CK_F_PR_NEG_8
+#define CK_F_PR_NEG_8
+CK_PR_N_S(neg, 8, uint8_t, -)
+#endif /* CK_F_PR_NEG_8 */
+
+#ifndef CK_F_PR_NEG_8_ZERO
+#define CK_F_PR_NEG_8_ZERO
+CK_PR_N_Z_S(8, uint8_t)
+#endif /* CK_F_PR_NEG_8_ZERO */
+
+#endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */
+
+#undef CK_PR_N_Z_S
+#undef CK_PR_N_S
+#undef CK_PR_N_Z
+#undef CK_PR_N
+
+#define CK_PR_FAA(S, M, T, C)						\
+	CK_CC_INLINE static C						\
+	ck_pr_faa_##S(M *target, T delta)				\
+	{								\
+		T previous;						\
+		C punt;							\
+		punt = (C)ck_pr_load_##S(target);			\
+		previous = (T)punt;					\
+		while (ck_pr_cas_##S##_value(target,			\
+					     (C)previous,		\
+					     (C)(previous + delta),	\
+					     &previous) == false)	\
+			ck_pr_stall();					\
+									\
+		return ((C)previous);					\
+	}
+
+#define CK_PR_FAS(S, M, C)						\
+	CK_CC_INLINE static C						\
+	ck_pr_fas_##S(M *target, C update)				\
+	{								\
+		C previous;						\
+		previous = ck_pr_load_##S(target);			\
+		while (ck_pr_cas_##S##_value(target,			\
+					     previous,			\
+					     update,			\
+					     &previous) == false)	\
+			ck_pr_stall();					\
+									\
+		return (previous);					\
+	}
+
+#define CK_PR_FAA_S(S, M) CK_PR_FAA(S, M, M, M)
+#define CK_PR_FAS_S(S, M) CK_PR_FAS(S, M, M)
+
+#if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE)
+
+#ifndef CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_CHAR
+CK_PR_FAA_S(char, char)
+#endif /* CK_F_PR_FAA_CHAR */
+
+#ifndef CK_F_PR_FAS_CHAR
+#define CK_F_PR_FAS_CHAR
+CK_PR_FAS_S(char, char)
+#endif /* CK_F_PR_FAS_CHAR */
+
+#endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_INT
+CK_PR_FAA_S(int, int)
+#endif /* CK_F_PR_FAA_INT */
+
+#ifndef CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_INT
+CK_PR_FAS_S(int, int)
+#endif /* CK_F_PR_FAS_INT */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE)
+
+#ifndef CK_F_PR_FAA_DOUBLE
+#define CK_F_PR_FAA_DOUBLE
+CK_PR_FAA_S(double, double)
+#endif /* CK_F_PR_FAA_DOUBLE */
+
+#ifndef CK_F_PR_FAS_DOUBLE
+#define CK_F_PR_FAS_DOUBLE
+CK_PR_FAS_S(double, double)
+#endif /* CK_F_PR_FAS_DOUBLE */
+
+#endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_FAA_UINT
+#define CK_F_PR_FAA_UINT
+CK_PR_FAA_S(uint, unsigned int)
+#endif /* CK_F_PR_FAA_UINT */
+
+#ifndef CK_F_PR_FAS_UINT
+#define CK_F_PR_FAS_UINT
+CK_PR_FAS_S(uint, unsigned int)
+#endif /* CK_F_PR_FAS_UINT */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_PTR
+CK_PR_FAA(ptr, void, uintptr_t, void *)
+#endif /* CK_F_PR_FAA_PTR */
+
+#ifndef CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_PTR
+CK_PR_FAS(ptr, void, void *)
+#endif /* CK_F_PR_FAS_PTR */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_FAA_64
+#define CK_F_PR_FAA_64
+CK_PR_FAA_S(64, uint64_t)
+#endif /* CK_F_PR_FAA_64 */
+
+#ifndef CK_F_PR_FAS_64
+#define CK_F_PR_FAS_64
+CK_PR_FAS_S(64, uint64_t)
+#endif /* CK_F_PR_FAS_64 */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_FAA_32
+#define CK_F_PR_FAA_32
+CK_PR_FAA_S(32, uint32_t)
+#endif /* CK_F_PR_FAA_32 */
+
+#ifndef CK_F_PR_FAS_32
+#define CK_F_PR_FAS_32
+CK_PR_FAS_S(32, uint32_t)
+#endif /* CK_F_PR_FAS_32 */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_FAA_16
+#define CK_F_PR_FAA_16
+CK_PR_FAA_S(16, uint16_t)
+#endif /* CK_F_PR_FAA_16 */
+
+#ifndef CK_F_PR_FAS_16
+#define CK_F_PR_FAS_16
+CK_PR_FAS_S(16, uint16_t)
+#endif /* CK_F_PR_FAS_16 */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE)
+
+#ifndef CK_F_PR_FAA_8
+#define CK_F_PR_FAA_8
+CK_PR_FAA_S(8, uint8_t)
+#endif /* CK_F_PR_FAA_8 */
+
+#ifndef CK_F_PR_FAS_8
+#define CK_F_PR_FAS_8
+CK_PR_FAS_S(8, uint8_t)
+#endif /* CK_F_PR_FAS_8 */
+
+#endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */
+
+#undef CK_PR_FAA_S
+#undef CK_PR_FAS_S
+#undef CK_PR_FAA
+#undef CK_PR_FAS
+
+#endif /* _CK_PR_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_queue.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_queue.h b/lib/ck/include/ck_queue.h
new file mode 100644
index 0000000..3d8824f
--- /dev/null
+++ b/lib/ck/include/ck_queue.h
@@ -0,0 +1,418 @@
+/*
+ * Copyright 2012-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1991, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)queue.h	8.5 (Berkeley) 8/20/94
+ * $FreeBSD: release/9.0.0/sys/sys/queue.h 221843 2011-05-13 15:49:23Z mdf $
+ */
+
+#ifndef _CK_QUEUE_H_
+#define	_CK_QUEUE_H_
+
+#include <ck_pr.h>
+
+/*
+ * This file defines four types of data structures: singly-linked lists,
+ * singly-linked tail queues, lists and tail queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction.  Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * It is safe to use _FOREACH/_FOREACH_SAFE in the presence of concurrent
+ * modifications to the list. Writers to these lists must, on the other hand,
+ * implement writer-side synchronization. The _SWAP operations are not atomic.
+ * This facility is currently unsupported on architectures such as the Alpha
+ * which require load-depend memory fences.
+ *
+ *				CK_SLIST	CK_LIST	CK_STAILQ
+ * _HEAD			+		+	+
+ * _HEAD_INITIALIZER		+		+	+
+ * _ENTRY			+		+	+
+ * _INIT			+		+	+
+ * _EMPTY			+		+	+
+ * _FIRST			+		+	+
+ * _NEXT			+		+	+
+ * _FOREACH			+		+	+
+ * _FOREACH_SAFE		+		+	+
+ * _INSERT_HEAD			+		+	+
+ * _INSERT_BEFORE		-		+	-
+ * _INSERT_AFTER		+		+	+
+ * _INSERT_TAIL			-		-	+
+ * _REMOVE_AFTER		+		-	+
+ * _REMOVE_HEAD			+		-	+
+ * _REMOVE			+		+	+
+ * _SWAP			+		+	+
+ * _MOVE			+		+	+
+ */
+
+/*
+ * Singly-linked List declarations.
+ */
+#define	CK_SLIST_HEAD(name, type)						\
+struct name {									\
+	struct type *slh_first;	/* first element */				\
+}
+
+#define	CK_SLIST_HEAD_INITIALIZER(head)						\
+	{ NULL }
+
+#define	CK_SLIST_ENTRY(type)							\
+struct {									\
+	struct type *sle_next;	/* next element */				\
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define	CK_SLIST_EMPTY(head)							\
+	(ck_pr_load_ptr(&(head)->slh_first) == NULL)
+
+#define	CK_SLIST_FIRST(head)							\
+	(ck_pr_load_ptr(&(head)->slh_first))
+
+#define	CK_SLIST_NEXT(elm, field)						\
+	ck_pr_load_ptr(&((elm)->field.sle_next))
+
+#define	CK_SLIST_FOREACH(var, head, field)					\
+	for ((var) = CK_SLIST_FIRST((head));					\
+	    (var) && (ck_pr_fence_load(), 1);					\
+	    (var) = CK_SLIST_NEXT((var), field))
+
+#define	CK_SLIST_FOREACH_SAFE(var, head, field, tvar)				 \
+	for ((var) = CK_SLIST_FIRST(head);					 \
+	    (var) && (ck_pr_fence_load(), (tvar) = CK_SLIST_NEXT(var, field), 1);\
+	    (var) = (tvar))
+
+#define	CK_SLIST_FOREACH_PREVPTR(var, varp, head, field)			\
+	for ((varp) = &(head)->slh_first;					\
+	    ((var) = ck_pr_load_ptr(varp)) != NULL && (ck_pr_fence_load(), 1);	\
+	    (varp) = &(var)->field.sle_next)
+
+#define	CK_SLIST_INIT(head) do {						\
+	ck_pr_store_ptr(&(head)->slh_first, NULL);				\
+	ck_pr_fence_store();							\
+} while (0)
+
+#define	CK_SLIST_INSERT_AFTER(a, b, field) do {					\
+	(b)->field.sle_next = (a)->field.sle_next;				\
+	ck_pr_fence_store();							\
+	ck_pr_store_ptr(&(a)->field.sle_next, b);				\
+} while (0)
+
+#define	CK_SLIST_INSERT_HEAD(head, elm, field) do {				\
+	(elm)->field.sle_next = (head)->slh_first;				\
+	ck_pr_fence_store();							\
+	ck_pr_store_ptr(&(head)->slh_first, elm);				\
+} while (0)
+
+#define CK_SLIST_REMOVE_AFTER(elm, field) do {					\
+	ck_pr_store_ptr(&(elm)->field.sle_next,					\
+	    (elm)->field.sle_next->field.sle_next);				\
+} while (0)
+
+#define	CK_SLIST_REMOVE(head, elm, type, field) do {				\
+	if ((head)->slh_first == (elm)) {					\
+		CK_SLIST_REMOVE_HEAD((head), field);				\
+	} else {								\
+		struct type *curelm = (head)->slh_first;			\
+		while (curelm->field.sle_next != (elm))				\
+			curelm = curelm->field.sle_next;			\
+		CK_SLIST_REMOVE_AFTER(curelm, field);				\
+	}									\
+} while (0)
+
+#define	CK_SLIST_REMOVE_HEAD(head, field) do {					\
+	ck_pr_store_ptr(&(head)->slh_first,					\
+		(head)->slh_first->field.sle_next);				\
+} while (0)
+
+#define CK_SLIST_MOVE(head1, head2, field) do {					\
+	ck_pr_store_ptr(&(head1)->slh_first, (head2)->slh_first);		\
+} while (0)
+
+/*
+ * This operation is not applied atomically.
+ */
+#define CK_SLIST_SWAP(a, b, type) do {						\
+	struct type *swap_first = (a)->slh_first;				\
+	(a)->slh_first = (b)->slh_first;					\
+	(b)->slh_first = swap_first;						\
+} while (0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define	CK_STAILQ_HEAD(name, type)					\
+struct name {								\
+	struct type *stqh_first;/* first element */			\
+	struct type **stqh_last;/* addr of last next element */		\
+}
+
+#define	CK_STAILQ_HEAD_INITIALIZER(head)				\
+	{ NULL, &(head).stqh_first }
+
+#define	CK_STAILQ_ENTRY(type)						\
+struct {								\
+	struct type *stqe_next;	/* next element */			\
+}
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define	CK_STAILQ_CONCAT(head1, head2) do {					\
+	if ((head2)->stqh_first == NULL) {					\
+		ck_pr_store_ptr((head1)->stqh_last, (head2)->stqh_first);	\
+		ck_pr_fence_store();						\
+		(head1)->stqh_last = (head2)->stqh_last;			\
+		CK_STAILQ_INIT((head2));					\
+	}									\
+} while (0)
+
+#define	CK_STAILQ_EMPTY(head)	(ck_pr_load_ptr(&(head)->stqh_first) == NULL)
+
+#define	CK_STAILQ_FIRST(head)	(ck_pr_load_ptr(&(head)->stqh_first))
+
+#define	CK_STAILQ_FOREACH(var, head, field)				\
+	for((var) = CK_STAILQ_FIRST((head));				\
+	   (var) && (ck_pr_fence_load(), 1);				\
+	   (var) = CK_STAILQ_NEXT((var), field))
+
+#define	CK_STAILQ_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = CK_STAILQ_FIRST((head));				\
+	    (var) && (ck_pr_fence_load(), (tvar) =			\
+		CK_STAILQ_NEXT((var), field), 1);			\
+	    (var) = (tvar))
+
+#define	CK_STAILQ_INIT(head) do {					\
+	ck_pr_store_ptr(&(head)->stqh_first, NULL);			\
+	ck_pr_fence_store();						\
+	(head)->stqh_last = &(head)->stqh_first;			\
+} while (0)
+
+#define	CK_STAILQ_INSERT_AFTER(head, tqelm, elm, field) do {			\
+	(elm)->field.stqe_next = (tqelm)->field.stqe_next;			\
+	ck_pr_fence_store();							\
+	ck_pr_store_ptr(&(tqelm)->field.stqe_next, elm);			\
+	if ((elm)->field.stqe_next == NULL)					\
+		(head)->stqh_last = &(elm)->field.stqe_next;			\
+} while (0)
+
+#define	CK_STAILQ_INSERT_HEAD(head, elm, field) do {				\
+	(elm)->field.stqe_next = (head)->stqh_first;				\
+	ck_pr_fence_store();							\
+	ck_pr_store_ptr(&(head)->stqh_first, elm);				\
+	if ((elm)->field.stqe_next == NULL)					\
+		(head)->stqh_last = &(elm)->field.stqe_next;			\
+} while (0)
+
+#define	CK_STAILQ_INSERT_TAIL(head, elm, field) do {				\
+	(elm)->field.stqe_next = NULL;						\
+	ck_pr_fence_store();							\
+	ck_pr_store_ptr((head)->stqh_last, (elm));				\
+	(head)->stqh_last = &(elm)->field.stqe_next;				\
+} while (0)
+
+#define	CK_STAILQ_NEXT(elm, field)						\
+	(ck_pr_load_ptr(&(elm)->field.stqe_next))
+
+#define	CK_STAILQ_REMOVE(head, elm, type, field) do {				\
+	if ((head)->stqh_first == (elm)) {					\
+		CK_STAILQ_REMOVE_HEAD((head), field);				\
+	} else {								\
+		struct type *curelm = (head)->stqh_first;			\
+		while (curelm->field.stqe_next != (elm))			\
+			curelm = curelm->field.stqe_next;			\
+		CK_STAILQ_REMOVE_AFTER(head, curelm, field);			\
+	}									\
+} while (0)
+
+#define CK_STAILQ_REMOVE_AFTER(head, elm, field) do {				\
+	ck_pr_store_ptr(&(elm)->field.stqe_next,				\
+	    (elm)->field.stqe_next->field.stqe_next);				\
+	if ((elm)->field.stqe_next == NULL)					\
+		(head)->stqh_last = &(elm)->field.stqe_next;			\
+} while (0)
+
+#define	CK_STAILQ_REMOVE_HEAD(head, field) do {					\
+	ck_pr_store_ptr(&(head)->stqh_first,					\
+	    (head)->stqh_first->field.stqe_next);				\
+	if ((head)->stqh_first == NULL)						\
+		(head)->stqh_last = &(head)->stqh_first;			\
+} while (0)
+
+#define CK_STAILQ_MOVE(head1, head2, field) do {				\
+	ck_pr_store_ptr(&(head1)->stqh_first, (head2)->stqh_first);		\
+	(head1)->stqh_last = (head2)->stqh_last;				\
+	if ((head2)->stqh_last == &(head2)->stqh_first)				\
+		(head1)->stqh_last = &(head1)->stqh_first;			\
+} while (0)
+
+/*
+ * This operation is not applied atomically.
+ */
+#define CK_STAILQ_SWAP(head1, head2, type) do {				\
+	struct type *swap_first = CK_STAILQ_FIRST(head1);		\
+	struct type **swap_last = (head1)->stqh_last;			\
+	CK_STAILQ_FIRST(head1) = CK_STAILQ_FIRST(head2);		\
+	(head1)->stqh_last = (head2)->stqh_last;			\
+	CK_STAILQ_FIRST(head2) = swap_first;				\
+	(head2)->stqh_last = swap_last;					\
+	if (CK_STAILQ_EMPTY(head1))					\
+		(head1)->stqh_last = &(head1)->stqh_first;		\
+	if (CK_STAILQ_EMPTY(head2))					\
+		(head2)->stqh_last = &(head2)->stqh_first;		\
+} while (0)
+
+/*
+ * List declarations.
+ */
+#define	CK_LIST_HEAD(name, type)						\
+struct name {									\
+	struct type *lh_first;	/* first element */				\
+}
+
+#define	CK_LIST_HEAD_INITIALIZER(head)						\
+	{ NULL }
+
+#define	CK_LIST_ENTRY(type)							\
+struct {									\
+	struct type *le_next;	/* next element */				\
+	struct type **le_prev;	/* address of previous next element */		\
+}
+
+#define	CK_LIST_FIRST(head)		ck_pr_load_ptr(&(head)->lh_first)
+#define	CK_LIST_EMPTY(head)		(CK_LIST_FIRST(head) == NULL)
+#define	CK_LIST_NEXT(elm, field)	ck_pr_load_ptr(&(elm)->field.le_next)
+
+#define	CK_LIST_FOREACH(var, head, field)					\
+	for ((var) = CK_LIST_FIRST((head));					\
+	    (var) && (ck_pr_fence_load(), 1);					\
+	    (var) = CK_LIST_NEXT((var), field))
+
+#define	CK_LIST_FOREACH_SAFE(var, head, field, tvar)				  \
+	for ((var) = CK_LIST_FIRST((head));					  \
+	    (var) && (ck_pr_fence_load(), (tvar) = CK_LIST_NEXT((var), field), 1);\
+	    (var) = (tvar))
+
+#define	CK_LIST_INIT(head) do {							\
+	ck_pr_store_ptr(&(head)->lh_first, NULL);				\
+	ck_pr_fence_store();							\
+} while (0)
+
+#define	CK_LIST_INSERT_AFTER(listelm, elm, field) do {				\
+	(elm)->field.le_next = (listelm)->field.le_next;			\
+	(elm)->field.le_prev = &(listelm)->field.le_next;			\
+	ck_pr_fence_store();							\
+	if ((listelm)->field.le_next != NULL)					\
+		(listelm)->field.le_next->field.le_prev = &(elm)->field.le_next;\
+	ck_pr_store_ptr(&(listelm)->field.le_next, elm);			\
+} while (0)
+
+#define	CK_LIST_INSERT_BEFORE(listelm, elm, field) do {				\
+	(elm)->field.le_prev = (listelm)->field.le_prev;			\
+	(elm)->field.le_next = (listelm);					\
+	ck_pr_fence_store();							\
+	ck_pr_store_ptr((listelm)->field.le_prev, (elm));			\
+	(listelm)->field.le_prev = &(elm)->field.le_next;			\
+} while (0)
+
+#define	CK_LIST_INSERT_HEAD(head, elm, field) do {				\
+	(elm)->field.le_next = (head)->lh_first;				\
+	ck_pr_fence_store();							\
+	if ((elm)->field.le_next != NULL)					\
+		(head)->lh_first->field.le_prev =  &(elm)->field.le_next;	\
+	ck_pr_store_ptr(&(head)->lh_first, elm);				\
+	(elm)->field.le_prev = &(head)->lh_first;				\
+} while (0)
+
+#define	CK_LIST_REMOVE(elm, field) do {						\
+	ck_pr_store_ptr((elm)->field.le_prev, (elm)->field.le_next);		\
+	if ((elm)->field.le_next != NULL)					\
+		(elm)->field.le_next->field.le_prev = (elm)->field.le_prev;	\
+} while (0)
+
+#define CK_LIST_MOVE(head1, head2, field) do {				\
+	ck_pr_store_ptr(&(head1)->lh_first, (head2)->lh_first);		\
+	if ((head1)->lh_first != NULL)					\
+		(head1)->lh_first->field.le_prev = &(head1)->lh_first;	\
+} while (0)
+
+/*
+ * This operation is not applied atomically.
+ */
+#define CK_LIST_SWAP(head1, head2, type, field) do {			\
+	struct type *swap_tmp = (head1)->lh_first;			\
+	(head1)->lh_first = (head2)->lh_first;				\
+	(head2)->lh_first = swap_tmp;					\
+	if ((swap_tmp = (head1)->lh_first) != NULL)			\
+		swap_tmp->field.le_prev = &(head1)->lh_first;		\
+	if ((swap_tmp = (head2)->lh_first) != NULL)			\
+		swap_tmp->field.le_prev = &(head2)->lh_first;		\
+} while (0)
+
+#endif /* _CK_QUEUE_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_rhs.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_rhs.h b/lib/ck/include/ck_rhs.h
new file mode 100644
index 0000000..7a28f7d
--- /dev/null
+++ b/lib/ck/include/ck_rhs.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2012-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_RHS_H
+#define _CK_RHS_H
+
+#include <ck_cc.h>
+#include <ck_malloc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+/*
+ * Indicates a single-writer many-reader workload. Mutually
+ * exclusive with CK_RHS_MODE_MPMC
+ */
+#define CK_RHS_MODE_SPMC		1
+
+/*
+ * Indicates that values to be stored are not pointers but
+ * values. Allows for full precision. Mutually exclusive
+ * with CK_RHS_MODE_OBJECT.
+ */
+#define CK_RHS_MODE_DIRECT	2
+
+/* 
+ * Indicates that the values to be stored are pointers.
+ * Allows for space optimizations in the presence of pointer
+ * packing. Mutually exclusive with CK_RHS_MODE_DIRECT.
+ */
+#define CK_RHS_MODE_OBJECT	8
+
+/*
+ * Indicated that the load is read-mostly, so get should be optimized
+ * over put and delete
+ */
+#define CK_RHS_MODE_READ_MOSTLY	16
+
+/* Currently unsupported. */
+#define CK_RHS_MODE_MPMC    (void)
+
+/*
+ * Hash callback function.
+ */
+typedef unsigned long ck_rhs_hash_cb_t(const void *, unsigned long);
+
+/*
+ * Returns pointer to object if objects are equivalent.
+ */
+typedef bool ck_rhs_compare_cb_t(const void *, const void *);
+
+#if defined(CK_MD_POINTER_PACK_ENABLE) && defined(CK_MD_VMA_BITS)
+#define CK_RHS_PP
+#define CK_RHS_KEY_MASK ((1U << ((sizeof(void *) * 8) - CK_MD_VMA_BITS)) - 1)
+#endif
+
+struct ck_rhs_map;
+struct ck_rhs {
+	struct ck_malloc *m;
+	struct ck_rhs_map *map;
+	unsigned int mode;
+	unsigned long seed;
+	ck_rhs_hash_cb_t *hf;
+	ck_rhs_compare_cb_t *compare;
+};
+typedef struct ck_rhs ck_rhs_t;
+
+struct ck_rhs_stat {
+	unsigned long n_entries;
+	unsigned int probe_maximum;
+};
+
+struct ck_rhs_iterator {
+	void **cursor;
+	unsigned long offset;
+};
+typedef struct ck_rhs_iterator ck_rhs_iterator_t;
+
+#define CK_RHS_ITERATOR_INITIALIZER { NULL, 0 }
+
+/* Convenience wrapper to table hash function. */
+#define CK_RHS_HASH(T, F, K) F((K), (T)->seed)
+
+void ck_rhs_iterator_init(ck_rhs_iterator_t *);
+bool ck_rhs_next(ck_rhs_t *, ck_rhs_iterator_t *, void **);
+bool ck_rhs_move(ck_rhs_t *, ck_rhs_t *, ck_rhs_hash_cb_t *,
+    ck_rhs_compare_cb_t *, struct ck_malloc *);
+bool ck_rhs_init(ck_rhs_t *, unsigned int, ck_rhs_hash_cb_t *,
+    ck_rhs_compare_cb_t *, struct ck_malloc *, unsigned long, unsigned long);
+void ck_rhs_destroy(ck_rhs_t *);
+void *ck_rhs_get(ck_rhs_t *, unsigned long, const void *);
+bool ck_rhs_put(ck_rhs_t *, unsigned long, const void *);
+bool ck_rhs_put_unique(ck_rhs_t *, unsigned long, const void *);
+bool ck_rhs_set(ck_rhs_t *, unsigned long, const void *, void **);
+bool ck_rhs_fas(ck_rhs_t *, unsigned long, const void *, void **);
+void *ck_rhs_remove(ck_rhs_t *, unsigned long, const void *);
+bool ck_rhs_grow(ck_rhs_t *, unsigned long);
+bool ck_rhs_rebuild(ck_rhs_t *);
+bool ck_rhs_gc(ck_rhs_t *);
+unsigned long ck_rhs_count(ck_rhs_t *);
+bool ck_rhs_reset(ck_rhs_t *);
+bool ck_rhs_reset_size(ck_rhs_t *, unsigned long);
+void ck_rhs_stat(ck_rhs_t *, struct ck_rhs_stat *);
+
+#endif /* _CK_RHS_H */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_ring.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_ring.h b/lib/ck/include/ck_ring.h
new file mode 100644
index 0000000..88bb837
--- /dev/null
+++ b/lib/ck/include/ck_ring.h
@@ -0,0 +1,435 @@
+/*
+ * Copyright 2009-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_RING_H
+#define _CK_RING_H
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <string.h>
+
+/*
+ * Concurrent ring buffer.
+ */
+
+struct ck_ring {
+	unsigned int c_head;
+	char pad[CK_MD_CACHELINE - sizeof(unsigned int)];
+	unsigned int p_tail;
+	char _pad[CK_MD_CACHELINE - sizeof(unsigned int)];
+	unsigned int size;
+	unsigned int mask;
+};
+typedef struct ck_ring ck_ring_t;
+
+struct ck_ring_buffer {
+	void *value;
+};
+typedef struct ck_ring_buffer ck_ring_buffer_t;
+
+CK_CC_INLINE static unsigned int
+ck_ring_size(struct ck_ring *ring)
+{
+	unsigned int c, p;
+
+	c = ck_pr_load_uint(&ring->c_head);
+	p = ck_pr_load_uint(&ring->p_tail);
+	return (p - c) & ring->mask;
+}
+
+CK_CC_INLINE static unsigned int
+ck_ring_capacity(struct ck_ring *ring)
+{
+
+	return ring->size;
+}
+
+/*
+ * Atomically enqueues the specified entry. Returns true on success, returns
+ * false if the ck_ring is full. This operation only support one active
+ * invocation at a time and works in the presence of a concurrent invocation
+ * of ck_ring_dequeue_spsc.
+ *
+ * This variant of ck_ring_enqueue_spsc returns the snapshot of queue length
+ * with respect to the linearization point. This can be used to extract ring
+ * size without incurring additional cacheline invalidation overhead from the
+ * writer.
+ */
+CK_CC_INLINE static bool
+_ck_ring_enqueue_spsc_size(struct ck_ring *ring,
+    void *restrict buffer,
+    const void *restrict entry,
+    unsigned int type_size,
+    unsigned int *size)
+{
+	unsigned int consumer, producer, delta;
+	unsigned int mask = ring->mask;
+
+	consumer = ck_pr_load_uint(&ring->c_head);
+	producer = ring->p_tail;
+	delta = producer + 1;
+	*size = (producer - consumer) & mask;
+
+	if ((delta & mask) == (consumer & mask))
+		return false;
+
+	buffer = (char *)buffer + type_size * (producer & mask);
+	memcpy(buffer, entry, type_size);
+
+	/*
+	 * Make sure to update slot value before indicating
+	 * that the slot is available for consumption.
+	 */
+	ck_pr_fence_store();
+	ck_pr_store_uint(&ring->p_tail, delta);
+	return true;
+}
+
+CK_CC_INLINE static bool
+ck_ring_enqueue_spsc_size(struct ck_ring *ring,
+    struct ck_ring_buffer *buffer,
+    void *entry,
+    unsigned int *size)
+{
+
+	return _ck_ring_enqueue_spsc_size(ring, buffer, &entry,
+	    sizeof(void *), size);
+}
+
+/*
+ * Atomically enqueues the specified entry. Returns true on success, returns
+ * false if the ck_ring is full. This operation only support one active
+ * invocation at a time and works in the presence of a concurrent invocation
+ * of ck_ring_dequeue_spsc.
+ */
+CK_CC_INLINE static bool
+_ck_ring_enqueue_spsc(struct ck_ring *ring,
+    void *restrict destination,
+    const void *restrict source,
+    unsigned int size)
+{
+	unsigned int consumer, producer, delta;
+	unsigned int mask = ring->mask;
+
+	consumer = ck_pr_load_uint(&ring->c_head);
+	producer = ring->p_tail;
+	delta = producer + 1;
+
+	if ((delta & mask) == (consumer & mask))
+		return false;
+
+	destination = (char *)destination + size * (producer & mask);
+	memcpy(destination, source, size);
+
+	/*
+	 * Make sure to update slot value before indicating
+	 * that the slot is available for consumption.
+	 */
+	ck_pr_fence_store();
+	ck_pr_store_uint(&ring->p_tail, delta);
+	return true;
+}
+
+CK_CC_INLINE static bool
+ck_ring_enqueue_spsc(struct ck_ring *ring,
+    struct ck_ring_buffer *buffer,
+    const void *entry)
+{
+
+	return _ck_ring_enqueue_spsc(ring, buffer,
+	    &entry, sizeof(entry));
+}
+
+/*
+ * Single consumer and single producer ring buffer dequeue (consumer).
+ */
+CK_CC_INLINE static bool
+_ck_ring_dequeue_spsc(struct ck_ring *ring,
+    void *restrict buffer,
+    void *restrict target,
+    unsigned int size)
+{
+	unsigned int consumer, producer;
+	unsigned int mask = ring->mask;
+
+	consumer = ring->c_head;
+	producer = ck_pr_load_uint(&ring->p_tail);
+
+	if (consumer == producer)
+		return false;
+
+	/*
+	 * Make sure to serialize with respect to our snapshot
+	 * of the producer counter.
+	 */
+	ck_pr_fence_load();
+
+	buffer = (char *)buffer + size * (consumer & mask);
+	memcpy(target, buffer, size);
+
+	/*
+	 * Make sure copy is completed with respect to consumer
+	 * update.
+	 */
+	ck_pr_fence_store();
+	ck_pr_store_uint(&ring->c_head, consumer + 1);
+	return true;
+}
+
+CK_CC_INLINE static bool
+ck_ring_dequeue_spsc(struct ck_ring *ring,
+    struct ck_ring_buffer *buffer,
+    void *data)
+{
+
+	return _ck_ring_dequeue_spsc(ring, buffer,
+	    data, sizeof(void *));
+}
+
+/*
+ * Atomically enqueues the specified entry. Returns true on success, returns
+ * false if the ck_ring is full. This operation only support one active
+ * invocation at a time and works in the presence of up to UINT_MAX concurrent
+ * invocations of ck_ring_dequeue_spmc.
+ *
+ * This variant of ck_ring_enqueue_spmc returns the snapshot of queue length
+ * with respect to the linearization point. This can be used to extract ring
+ * size without incurring additional cacheline invalidation overhead from the
+ * writer.
+ */
+CK_CC_INLINE static bool
+ck_ring_enqueue_spmc_size(struct ck_ring *ring,
+    struct ck_ring_buffer *buffer,
+    void *entry,
+    unsigned int *size)
+{
+
+	return ck_ring_enqueue_spsc_size(ring, buffer,
+	    entry, size);
+}
+
+/*
+ * Atomically enqueues the specified entry. Returns true on success, returns
+ * false if the ck_ring is full. This operation only support one active
+ * invocation at a time and works in the presence of up to UINT_MAX concurrent
+ * invocations of ck_ring_dequeue_spmc.
+ */
+CK_CC_INLINE static bool
+ck_ring_enqueue_spmc(struct ck_ring *ring,
+    struct ck_ring_buffer *buffer,
+    void *entry)
+{
+
+	return ck_ring_enqueue_spsc(ring, buffer, entry);
+}
+
+CK_CC_INLINE static bool
+_ck_ring_trydequeue_spmc(struct ck_ring *ring,
+    void *restrict buffer,
+    void *data,
+    unsigned int size)
+{
+	unsigned int consumer, producer;
+	unsigned int mask = ring->mask;
+
+	consumer = ck_pr_load_uint(&ring->c_head);
+	ck_pr_fence_load();
+	producer = ck_pr_load_uint(&ring->p_tail);
+
+	if (consumer == producer)
+		return false;
+
+	ck_pr_fence_load();
+
+	buffer = (char *)buffer + size * (consumer & mask);
+	memcpy(data, buffer, size);
+
+	ck_pr_fence_store_atomic();
+	return ck_pr_cas_uint(&ring->c_head, consumer, consumer + 1);
+}
+
+CK_CC_INLINE static bool
+ck_ring_trydequeue_spmc(struct ck_ring *ring,
+    struct ck_ring_buffer *buffer,
+    void *data)
+{
+
+	return _ck_ring_trydequeue_spmc(ring,
+	    buffer, data, sizeof(void *));
+}
+
+CK_CC_INLINE static bool
+_ck_ring_dequeue_spmc(struct ck_ring *ring,
+    void *buffer,
+    void *data,
+    unsigned int size)
+{
+	unsigned int consumer, producer;
+	unsigned int mask = ring->mask;
+	char *target;
+
+	consumer = ck_pr_load_uint(&ring->c_head);
+
+	do {
+		/*
+		 * Producer counter must represent state relative to
+		 * our latest consumer snapshot.
+		 */
+		ck_pr_fence_load();
+		producer = ck_pr_load_uint(&ring->p_tail);
+
+		if (consumer == producer)
+			return false;
+
+		ck_pr_fence_load();
+		
+		target = (char *)buffer + size * (consumer & mask);
+		memcpy(data, target, size);
+
+		/* Serialize load with respect to head update. */
+		ck_pr_fence_store_atomic();
+	} while (ck_pr_cas_uint_value(&ring->c_head,
+				      consumer,
+				      consumer + 1,
+				      &consumer) == false);
+
+	return true;
+}
+
+CK_CC_INLINE static bool
+ck_ring_dequeue_spmc(struct ck_ring *ring,
+    struct ck_ring_buffer *buffer,
+    void *data)
+{
+
+	return _ck_ring_dequeue_spmc(ring, buffer, data,
+	    sizeof(void *));
+}
+
+CK_CC_INLINE static void
+ck_ring_init(struct ck_ring *ring, unsigned int size)
+{
+
+	ring->size = size;
+	ring->mask = size - 1;
+	ring->p_tail = 0;
+	ring->c_head = 0;
+	return;
+}
+
+#define CK_RING_PROTOTYPE(name, type)			\
+CK_CC_INLINE static bool				\
+ck_ring_enqueue_spsc_size_##name(struct ck_ring *a,	\
+    struct type *b,					\
+    struct type *c,					\
+    unsigned int *d)					\
+{							\
+							\
+	return _ck_ring_enqueue_spsc_size(a, b, c,	\
+	    sizeof(struct type), d);			\
+}							\
+							\
+CK_CC_INLINE static bool				\
+ck_ring_enqueue_spsc_##name(struct ck_ring *a,		\
+    struct type *b,					\
+    struct type *c)					\
+{							\
+							\
+	return _ck_ring_enqueue_spsc(a, b, c,		\
+	    sizeof(struct type));			\
+}							\
+							\
+CK_CC_INLINE static bool				\
+ck_ring_dequeue_spsc_##name(struct ck_ring *a,		\
+    struct type *b,					\
+    struct type *c)					\
+{							\
+							\
+	return _ck_ring_dequeue_spsc(a, b, c,		\
+	    sizeof(struct type));			\
+}							\
+							\
+CK_CC_INLINE static bool				\
+ck_ring_enqueue_spmc_size_##name(struct ck_ring *a,	\
+    struct type *b,					\
+    struct type *c,					\
+    unsigned int *d)					\
+{							\
+							\
+	return _ck_ring_enqueue_spsc_size(a, b, c,	\
+	    sizeof(struct type), d);			\
+}							\
+							\
+							\
+CK_CC_INLINE static bool				\
+ck_ring_enqueue_spmc_##name(struct ck_ring *a,		\
+    struct type *b,					\
+    struct type *c)					\
+{							\
+							\
+	return _ck_ring_enqueue_spsc(a, b, c,		\
+	    sizeof(struct type));			\
+}							\
+							\
+CK_CC_INLINE static bool				\
+ck_ring_trydequeue_spmc_##name(struct ck_ring *a,	\
+    struct type *b,					\
+    struct type *c)					\
+{							\
+							\
+	return _ck_ring_trydequeue_spmc(a,		\
+	    b, c, sizeof(struct type));			\
+}							\
+							\
+CK_CC_INLINE static bool				\
+ck_ring_dequeue_spmc_##name(struct ck_ring *a,		\
+    struct type *b,					\
+    struct type *c)					\
+{							\
+							\
+	return _ck_ring_dequeue_spmc(a, b, c,		\
+	    sizeof(struct type));			\
+}
+
+#define CK_RING_ENQUEUE_SPSC(name, a, b, c)		\
+	ck_ring_enqueue_spsc_##name(a, b, c)
+#define CK_RING_ENQUEUE_SPSC_SIZE(name, a, b, c, d)	\
+	ck_ring_enqueue_spsc_size_##name(a, b, c, d)
+#define CK_RING_DEQUEUE_SPSC(name, a, b, c)		\
+	ck_ring_dequeue_spsc_##name(a, b, c)
+#define CK_RING_ENQUEUE_SPMC(name, a, b, c)		\
+	ck_ring_enqueue_spmc_##name(a, b, c)
+#define CK_RING_ENQUEUE_SPMC_SIZE(name, a, b, c, d)	\
+	ck_ring_enqueue_spmc_size_##name(a, b, c, d)
+#define CK_RING_TRYDEQUEUE_SPMC(name, a, b, c)		\
+	ck_ring_trydequeue_spmc_##name(a, b, c)
+#define CK_RING_DEQUEUE_SPMC(name, a, b, c)		\
+	ck_ring_dequeue_spmc_##name(a, b, c)
+
+#endif /* _CK_RING_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_rwcohort.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_rwcohort.h b/lib/ck/include/ck_rwcohort.h
new file mode 100644
index 0000000..e8e014e
--- /dev/null
+++ b/lib/ck/include/ck_rwcohort.h
@@ -0,0 +1,318 @@
+/*
+ * Copyright 2013-2014 Samy Al Bahra.
+ * Copyright 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_RWCOHORT_H
+#define _CK_RWCOHORT_H
+
+/*
+ * This is an implementation of NUMA-aware reader-writer locks as described in:
+ *     Calciu, I.; Dice, D.; Lev, Y.; Luchangco, V.; Marathe, V.; and Shavit, N. 2014.
+ *     NUMA-Aware Reader-Writer Locks
+ */
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stddef.h>
+#include <ck_cohort.h>
+
+#define CK_RWCOHORT_WP_NAME(N) ck_rwcohort_wp_##N
+#define CK_RWCOHORT_WP_INSTANCE(N) struct CK_RWCOHORT_WP_NAME(N)
+#define CK_RWCOHORT_WP_INIT(N, RW, WL) ck_rwcohort_wp_##N##_init(RW, WL)
+#define CK_RWCOHORT_WP_READ_LOCK(N, RW, C, GC, LC)	\
+	ck_rwcohort_wp_##N##_read_lock(RW, C, GC, LC)
+#define CK_RWCOHORT_WP_READ_UNLOCK(N, RW, C, GC, LC)	\
+	ck_rwcohort_wp_##N##_read_unlock(RW)
+#define CK_RWCOHORT_WP_WRITE_LOCK(N, RW, C, GC, LC)	\
+	ck_rwcohort_wp_##N##_write_lock(RW, C, GC, LC)
+#define CK_RWCOHORT_WP_WRITE_UNLOCK(N, RW, C, GC, LC)	\
+	ck_rwcohort_wp_##N##_write_unlock(RW, C, GC, LC)
+#define CK_RWCOHORT_WP_DEFAULT_WAIT_LIMIT 1000
+
+#define CK_RWCOHORT_WP_PROTOTYPE(N)							\
+	CK_RWCOHORT_WP_INSTANCE(N) {							\
+		unsigned int read_counter;						\
+		unsigned int write_barrier;						\
+		unsigned int wait_limit;						\
+	};										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_wp_##N##_init(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort,		\
+	    unsigned int wait_limit)							\
+	{										\
+											\
+		rw_cohort->read_counter = 0;						\
+		rw_cohort->write_barrier = 0;						\
+		rw_cohort->wait_limit = wait_limit;					\
+		ck_pr_barrier();							\
+		return;									\
+	}										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_wp_##N##_write_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort,		\
+	    CK_COHORT_INSTANCE(N) *cohort, void *global_context,			\
+	    void *local_context)							\
+	{										\
+											\
+		while (ck_pr_load_uint(&rw_cohort->write_barrier) > 0)			\
+			ck_pr_stall();							\
+											\
+		CK_COHORT_LOCK(N, cohort, global_context, local_context);		\
+											\
+		while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) 			\
+			ck_pr_stall();							\
+											\
+		return;									\
+	}										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_wp_##N##_write_unlock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort,	\
+	    CK_COHORT_INSTANCE(N) *cohort, void *global_context,			\
+	    void *local_context)							\
+	{										\
+											\
+		(void)rw_cohort;							\
+		CK_COHORT_UNLOCK(N, cohort, global_context, local_context);		\
+		return;									\
+	}										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_wp_##N##_read_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort,		\
+	    CK_COHORT_INSTANCE(N) *cohort, void *global_context,			\
+	    void *local_context)							\
+	{										\
+		unsigned int wait_count = 0;						\
+		bool raised = false;							\
+											\
+		for (;;) {								\
+			ck_pr_inc_uint(&rw_cohort->read_counter);			\
+			ck_pr_fence_atomic_load();					\
+			if (CK_COHORT_LOCKED(N, cohort, global_context,			\
+			    local_context) == false)					\
+				break;							\
+											\
+			ck_pr_dec_uint(&rw_cohort->read_counter);			\
+			while (CK_COHORT_LOCKED(N, cohort, global_context,		\
+			    local_context) == true) {					\
+				ck_pr_stall();						\
+				if (++wait_count > rw_cohort->wait_limit &&		\
+				    raised == false) {					\
+					ck_pr_inc_uint(&rw_cohort->write_barrier);	\
+					raised = true;					\
+				}							\
+			}								\
+		}									\
+											\
+		if (raised == true)							\
+			ck_pr_dec_uint(&rw_cohort->write_barrier);			\
+											\
+		ck_pr_fence_load();							\
+		return;									\
+	}										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_wp_##N##_read_unlock(CK_RWCOHORT_WP_INSTANCE(N) *cohort)		\
+	{										\
+											\
+		ck_pr_fence_load_atomic();						\
+		ck_pr_dec_uint(&cohort->read_counter);					\
+		return;									\
+	}
+
+#define CK_RWCOHORT_WP_INITIALIZER {							\
+	.read_counter = 0,								\
+	.write_barrier = 0,								\
+	.wait_limit = 0									\
+}
+
+#define CK_RWCOHORT_RP_NAME(N) ck_rwcohort_rp_##N
+#define CK_RWCOHORT_RP_INSTANCE(N) struct CK_RWCOHORT_RP_NAME(N)
+#define CK_RWCOHORT_RP_INIT(N, RW, WL) ck_rwcohort_rp_##N##_init(RW, WL)
+#define CK_RWCOHORT_RP_READ_LOCK(N, RW, C, GC, LC)	\
+	ck_rwcohort_rp_##N##_read_lock(RW, C, GC, LC)
+#define CK_RWCOHORT_RP_READ_UNLOCK(N, RW, C, GC, LC)	\
+	ck_rwcohort_rp_##N##_read_unlock(RW)
+#define CK_RWCOHORT_RP_WRITE_LOCK(N, RW, C, GC, LC)	\
+	ck_rwcohort_rp_##N##_write_lock(RW, C, GC, LC)
+#define CK_RWCOHORT_RP_WRITE_UNLOCK(N, RW, C, GC, LC)	\
+	ck_rwcohort_rp_##N##_write_unlock(RW, C, GC, LC)
+#define CK_RWCOHORT_RP_DEFAULT_WAIT_LIMIT 1000
+
+#define CK_RWCOHORT_RP_PROTOTYPE(N)							\
+	CK_RWCOHORT_RP_INSTANCE(N) {							\
+		unsigned int read_counter;						\
+		unsigned int read_barrier;						\
+		unsigned int wait_limit;						\
+	};										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_rp_##N##_init(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort,		\
+	    unsigned int wait_limit)							\
+	{										\
+											\
+		rw_cohort->read_counter = 0;						\
+		rw_cohort->read_barrier = 0;						\
+		rw_cohort->wait_limit = wait_limit;					\
+		ck_pr_barrier();							\
+		return;									\
+	}										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_rp_##N##_write_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort,		\
+	    CK_COHORT_INSTANCE(N) *cohort, void *global_context,			\
+	    void *local_context)							\
+	{										\
+		unsigned int wait_count = 0;						\
+		bool raised = false;							\
+											\
+		for (;;) {								\
+			CK_COHORT_LOCK(N, cohort, global_context, local_context);	\
+			if (ck_pr_load_uint(&rw_cohort->read_counter) == 0)		\
+				break;							\
+											\
+			CK_COHORT_UNLOCK(N, cohort, global_context, local_context);	\
+			while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) {		\
+				ck_pr_stall();						\
+				if (++wait_count > rw_cohort->wait_limit &&		\
+				    raised == false) {					\
+					ck_pr_inc_uint(&rw_cohort->read_barrier);	\
+					raised = true;					\
+				}							\
+			}								\
+		}									\
+											\
+		if (raised == true)							\
+			ck_pr_dec_uint(&rw_cohort->read_barrier);			\
+											\
+		return;									\
+	}										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_rp_##N##_write_unlock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort,	\
+	    CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context)	\
+	{										\
+											\
+		(void)rw_cohort;							\
+		CK_COHORT_UNLOCK(N, cohort, global_context, local_context);		\
+		return;									\
+	}										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_rp_##N##_read_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort,		\
+	    CK_COHORT_INSTANCE(N) *cohort, void *global_context,			\
+	    void *local_context)							\
+	{										\
+											\
+		while (ck_pr_load_uint(&rw_cohort->read_barrier) > 0)			\
+			ck_pr_stall();							\
+											\
+		ck_pr_inc_uint(&rw_cohort->read_counter);				\
+		ck_pr_fence_atomic_load();						\
+											\
+		while (CK_COHORT_LOCKED(N, cohort, global_context,			\
+		    local_context) == true)						\
+			ck_pr_stall();							\
+											\
+		return;									\
+	}										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_rp_##N##_read_unlock(CK_RWCOHORT_RP_INSTANCE(N) *cohort)		\
+	{										\
+											\
+		ck_pr_fence_load_atomic();						\
+		ck_pr_dec_uint(&cohort->read_counter);					\
+		return;									\
+	}
+
+#define CK_RWCOHORT_RP_INITIALIZER {							\
+	.read_counter = 0,								\
+	.read_barrier = 0,								\
+	.wait_limit = 0									\
+}
+
+#define CK_RWCOHORT_NEUTRAL_NAME(N) ck_rwcohort_neutral_##N
+#define CK_RWCOHORT_NEUTRAL_INSTANCE(N) struct CK_RWCOHORT_NEUTRAL_NAME(N)
+#define CK_RWCOHORT_NEUTRAL_INIT(N, RW) ck_rwcohort_neutral_##N##_init(RW)
+#define CK_RWCOHORT_NEUTRAL_READ_LOCK(N, RW, C, GC, LC)		\
+	ck_rwcohort_neutral_##N##_read_lock(RW, C, GC, LC)
+#define CK_RWCOHORT_NEUTRAL_READ_UNLOCK(N, RW, C, GC, LC)	\
+	ck_rwcohort_neutral_##N##_read_unlock(RW)
+#define CK_RWCOHORT_NEUTRAL_WRITE_LOCK(N, RW, C, GC, LC)	\
+	ck_rwcohort_neutral_##N##_write_lock(RW, C, GC, LC)
+#define CK_RWCOHORT_NEUTRAL_WRITE_UNLOCK(N, RW, C, GC, LC)	\
+	ck_rwcohort_neutral_##N##_write_unlock(RW, C, GC, LC)
+#define CK_RWCOHORT_NEUTRAL_DEFAULT_WAIT_LIMIT 1000
+
+#define CK_RWCOHORT_NEUTRAL_PROTOTYPE(N)						\
+	CK_RWCOHORT_NEUTRAL_INSTANCE(N) {						\
+		unsigned int read_counter;						\
+	};										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_neutral_##N##_init(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort)	\
+	{										\
+											\
+		rw_cohort->read_counter = 0;						\
+		ck_pr_barrier();							\
+		return;									\
+	}										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_neutral_##N##_write_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,\
+	    CK_COHORT_INSTANCE(N) *cohort, void *global_context,			\
+	    void *local_context)							\
+	{										\
+											\
+		CK_COHORT_LOCK(N, cohort, global_context, local_context);		\
+		while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) {			\
+			ck_pr_stall();							\
+		}									\
+		return;									\
+	}										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_neutral_##N##_write_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,\
+	    CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context)	\
+	{										\
+											\
+		(void)rw_cohort;							\
+		CK_COHORT_UNLOCK(N, cohort, global_context, local_context);		\
+		return;									\
+	}										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_neutral_##N##_read_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,	\
+	    CK_COHORT_INSTANCE(N) *cohort, void *global_context,			\
+	    void *local_context)							\
+	{										\
+											\
+		CK_COHORT_LOCK(N, cohort, global_context, local_context);		\
+		ck_pr_inc_uint(&rw_cohort->read_counter);				\
+		CK_COHORT_UNLOCK(N, cohort, global_context, local_context);		\
+		return;									\
+	}										\
+	CK_CC_INLINE static void							\
+	ck_rwcohort_neutral_##N##_read_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *cohort)	\
+	{										\
+											\
+		ck_pr_fence_load_atomic();						\
+		ck_pr_dec_uint(&cohort->read_counter);					\
+		return;									\
+	}
+
+#define CK_RWCOHORT_NEUTRAL_INITIALIZER {						\
+	.read_counter = 0,								\
+}
+
+#endif /* _CK_RWCOHORT_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_rwlock.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_rwlock.h b/lib/ck/include/ck_rwlock.h
new file mode 100644
index 0000000..63cb549
--- /dev/null
+++ b/lib/ck/include/ck_rwlock.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2011-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_RWLOCK_H
+#define _CK_RWLOCK_H
+
+#include <ck_elide.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+struct ck_rwlock {
+	unsigned int writer;
+	unsigned int n_readers;
+};
+typedef struct ck_rwlock ck_rwlock_t;
+
+#define CK_RWLOCK_INITIALIZER {0, 0}
+
+CK_CC_INLINE static void
+ck_rwlock_init(struct ck_rwlock *rw)
+{
+
+	rw->writer = 0;
+	rw->n_readers = 0;
+	ck_pr_barrier();
+	return;
+}
+
+CK_CC_INLINE static void
+ck_rwlock_write_unlock(ck_rwlock_t *rw)
+{
+
+	ck_pr_fence_release();
+	ck_pr_store_uint(&rw->writer, 0);
+	return;
+}
+
+CK_CC_INLINE static bool
+ck_rwlock_locked_writer(ck_rwlock_t *rw)
+{
+
+	ck_pr_fence_load();
+	return ck_pr_load_uint(&rw->writer);
+}
+
+CK_CC_INLINE static void
+ck_rwlock_write_downgrade(ck_rwlock_t *rw)
+{
+
+	ck_pr_inc_uint(&rw->n_readers);
+	ck_rwlock_write_unlock(rw);
+	return;
+}
+
+CK_CC_INLINE static bool
+ck_rwlock_locked(ck_rwlock_t *rw)
+{
+	unsigned int r;
+
+	ck_pr_fence_load();
+	r = ck_pr_load_uint(&rw->writer);
+	ck_pr_fence_load();
+
+	return ck_pr_load_uint(&rw->n_readers) | r;
+}
+
+CK_CC_INLINE static bool
+ck_rwlock_write_trylock(ck_rwlock_t *rw)
+{
+
+	if (ck_pr_fas_uint(&rw->writer, 1) != 0)
+		return false;
+
+	ck_pr_fence_atomic_load();
+
+	if (ck_pr_load_uint(&rw->n_readers) != 0) {
+		ck_rwlock_write_unlock(rw);
+		return false;
+	}
+
+	return true;
+}
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_rwlock_write, ck_rwlock_t,
+    ck_rwlock_locked, ck_rwlock_write_trylock)
+
+CK_CC_INLINE static void
+ck_rwlock_write_lock(ck_rwlock_t *rw)
+{
+
+	while (ck_pr_fas_uint(&rw->writer, 1) != 0)
+		ck_pr_stall();
+
+	ck_pr_fence_atomic_load();
+
+	while (ck_pr_load_uint(&rw->n_readers) != 0)
+		ck_pr_stall();
+
+	return;
+}
+
+CK_ELIDE_PROTOTYPE(ck_rwlock_write, ck_rwlock_t,
+    ck_rwlock_locked, ck_rwlock_write_lock,
+    ck_rwlock_locked_writer, ck_rwlock_write_unlock)
+
+CK_CC_INLINE static bool
+ck_rwlock_read_trylock(ck_rwlock_t *rw)
+{
+
+	if (ck_pr_load_uint(&rw->writer) != 0)
+		return false;
+
+	ck_pr_inc_uint(&rw->n_readers);
+
+	/*
+	 * Serialize with respect to concurrent write
+	 * lock operation.
+	 */
+	ck_pr_fence_atomic_load();
+
+	if (ck_pr_load_uint(&rw->writer) == 0) {
+		ck_pr_fence_load();
+		return true;
+	}
+
+	ck_pr_dec_uint(&rw->n_readers);
+	return false;
+}
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_rwlock_read, ck_rwlock_t,
+    ck_rwlock_locked_writer, ck_rwlock_read_trylock)
+
+CK_CC_INLINE static void
+ck_rwlock_read_lock(ck_rwlock_t *rw)
+{
+
+	for (;;) {
+		while (ck_pr_load_uint(&rw->writer) != 0)
+			ck_pr_stall();
+
+		ck_pr_inc_uint(&rw->n_readers);
+
+		/*
+		 * Serialize with respect to concurrent write
+		 * lock operation.
+		 */
+		ck_pr_fence_atomic_load();
+
+		if (ck_pr_load_uint(&rw->writer) == 0)
+			break;
+
+		ck_pr_dec_uint(&rw->n_readers);
+	}
+
+	/* Acquire semantics are necessary. */
+	ck_pr_fence_load();
+	return;
+}
+
+CK_CC_INLINE static bool
+ck_rwlock_locked_reader(ck_rwlock_t *rw)
+{
+
+	ck_pr_fence_load();
+	return ck_pr_load_uint(&rw->n_readers);
+}
+
+CK_CC_INLINE static void
+ck_rwlock_read_unlock(ck_rwlock_t *rw)
+{
+
+	ck_pr_fence_load_atomic();
+	ck_pr_dec_uint(&rw->n_readers);
+	return;
+}
+
+CK_ELIDE_PROTOTYPE(ck_rwlock_read, ck_rwlock_t,
+    ck_rwlock_locked_writer, ck_rwlock_read_lock,
+    ck_rwlock_locked_reader, ck_rwlock_read_unlock)
+
+/*
+ * Recursive writer reader-writer lock implementation.
+ */
+struct ck_rwlock_recursive {
+	struct ck_rwlock rw;
+	unsigned int wc;
+};
+typedef struct ck_rwlock_recursive ck_rwlock_recursive_t;
+
+#define CK_RWLOCK_RECURSIVE_INITIALIZER {CK_RWLOCK_INITIALIZER, 0}
+
+CK_CC_INLINE static void
+ck_rwlock_recursive_write_lock(ck_rwlock_recursive_t *rw, unsigned int tid)
+{
+	unsigned int o;
+
+	o = ck_pr_load_uint(&rw->rw.writer);
+	if (o == tid)
+		goto leave;
+
+	while (ck_pr_cas_uint(&rw->rw.writer, 0, tid) == false)
+		ck_pr_stall();
+
+	ck_pr_fence_atomic_load();
+
+	while (ck_pr_load_uint(&rw->rw.n_readers) != 0)
+		ck_pr_stall();
+
+leave:
+	rw->wc++;
+	return;
+}
+
+CK_CC_INLINE static bool
+ck_rwlock_recursive_write_trylock(ck_rwlock_recursive_t *rw, unsigned int tid)
+{
+	unsigned int o;
+
+	o = ck_pr_load_uint(&rw->rw.writer);
+	if (o == tid)
+		goto leave;
+
+	if (ck_pr_cas_uint(&rw->rw.writer, 0, tid) == false)
+		return false;
+
+	ck_pr_fence_atomic_load();
+
+	if (ck_pr_load_uint(&rw->rw.n_readers) != 0) {
+		ck_pr_store_uint(&rw->rw.writer, 0);
+		return false;
+	}
+
+leave:
+	rw->wc++;
+	return true;
+}
+
+CK_CC_INLINE static void
+ck_rwlock_recursive_write_unlock(ck_rwlock_recursive_t *rw)
+{
+
+	if (--rw->wc == 0) {
+		ck_pr_fence_release();
+		ck_pr_store_uint(&rw->rw.writer, 0);
+	}
+
+	return;
+}
+
+CK_CC_INLINE static void
+ck_rwlock_recursive_read_lock(ck_rwlock_recursive_t *rw)
+{
+
+	ck_rwlock_read_lock(&rw->rw);
+	return;
+}
+
+CK_CC_INLINE static bool
+ck_rwlock_recursive_read_trylock(ck_rwlock_recursive_t *rw)
+{
+
+	return ck_rwlock_read_trylock(&rw->rw);
+}
+
+CK_CC_INLINE static void
+ck_rwlock_recursive_read_unlock(ck_rwlock_recursive_t *rw)
+{
+
+	ck_rwlock_read_unlock(&rw->rw);
+	return;
+}
+
+#endif /* _CK_RWLOCK_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_sequence.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_sequence.h b/lib/ck/include/ck_sequence.h
new file mode 100644
index 0000000..14138ff
--- /dev/null
+++ b/lib/ck/include/ck_sequence.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_SEQUENCE_H
+#define _CK_SEQUENCE_H
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+
+struct ck_sequence {
+	unsigned int sequence;
+};
+typedef struct ck_sequence ck_sequence_t;
+
+#define CK_SEQUENCE_INITIALIZER { .sequence = 0 }
+
+CK_CC_INLINE static void
+ck_sequence_init(struct ck_sequence *sq)
+{
+
+	ck_pr_store_uint(&sq->sequence, 0);
+	return;
+}
+
+CK_CC_INLINE static unsigned int
+ck_sequence_read_begin(struct ck_sequence *sq)
+{
+	unsigned int version;
+
+	for (;;) {
+		version = ck_pr_load_uint(&sq->sequence);
+
+		/*
+		 * If a sequence is even then associated data may be in a
+		 * consistent state.
+		 */
+		if (CK_CC_LIKELY((version & 1) == 0))
+			break;
+
+		/*
+		 * If a sequence is odd then a thread is in the middle of an
+		 * update. Retry the read to avoid operating on inconsistent
+		 * data.
+		 */
+		ck_pr_stall();
+	}
+
+	ck_pr_fence_load();
+	return version;
+}
+
+CK_CC_INLINE static bool
+ck_sequence_read_retry(struct ck_sequence *sq, unsigned int version)
+{
+
+	/*
+	 * If the sequence number was updated then a read should be
+	 * re-attempted.
+	 */
+	ck_pr_fence_load();
+	return ck_pr_load_uint(&sq->sequence) != version;
+}
+
+#define CK_SEQUENCE_READ(seqlock, version) 						\
+	for (*(version) = 1;								\
+	    (*(version) != 0) && (*(version) = ck_sequence_read_begin(seqlock), 1);	\
+	    *(version) = ck_sequence_read_retry(seqlock, *(version)))
+
+/*
+ * This must be called after a successful mutex acquisition.
+ */
+CK_CC_INLINE static void
+ck_sequence_write_begin(struct ck_sequence *sq)
+{
+
+	/*
+	 * Increment the sequence to an odd number to indicate
+	 * the beginning of a write update.
+	 */
+	ck_pr_inc_uint(&sq->sequence);
+	ck_pr_fence_store();
+	return;
+}
+
+/*
+ * This must be called before mutex ownership is relinquished.
+ */
+CK_CC_INLINE static void
+ck_sequence_write_end(struct ck_sequence *sq)
+{
+
+	/*
+	 * Increment the sequence to an even number to indicate
+	 * completion of a write update.
+	 */
+	ck_pr_fence_store();
+	ck_pr_inc_uint(&sq->sequence);
+	return;
+}
+
+#endif /* _CK_SEQUENCE_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_spinlock.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_spinlock.h b/lib/ck/include/ck_spinlock.h
new file mode 100644
index 0000000..03f9900
--- /dev/null
+++ b/lib/ck/include/ck_spinlock.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_SPINLOCK_H
+#define _CK_SPINLOCK_H
+
+#include "spinlock/anderson.h"
+#include "spinlock/cas.h"
+#include "spinlock/clh.h"
+#include "spinlock/dec.h"
+#include "spinlock/fas.h"
+#include "spinlock/hclh.h"
+#include "spinlock/mcs.h"
+#include "spinlock/ticket.h"
+
+/*
+ * On tested x86, x86_64, PPC64 and SPARC64 targets,
+ * ck_spinlock_fas proved to have lowest latency
+ * in fast path testing or negligible degradation
+ * from faster but less robust implementations.
+ */
+#define CK_SPINLOCK_INITIALIZER CK_SPINLOCK_FAS_INITIALIZER
+#define ck_spinlock_t		ck_spinlock_fas_t
+#define ck_spinlock_init(x)	ck_spinlock_fas_init(x)
+#define ck_spinlock_lock(x)	ck_spinlock_fas_lock(x)
+#define ck_spinlock_lock_eb(x)	ck_spinlock_fas_lock_eb(x)
+#define ck_spinlock_unlock(x)	ck_spinlock_fas_unlock(x)
+#define ck_spinlock_locked(x)	ck_spinlock_fas_locked(x)
+#define ck_spinlock_trylock(x)	ck_spinlock_fas_trylock(x)
+
+CK_ELIDE_PROTOTYPE(ck_spinlock, ck_spinlock_t,
+    ck_spinlock_locked, ck_spinlock_lock,
+    ck_spinlock_locked, ck_spinlock_unlock)
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock, ck_spinlock_t,
+    ck_spinlock_locked, ck_spinlock_trylock)
+
+#endif /* _CK_SPINLOCK_H */
+