You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cm...@apache.org on 2013/08/13 23:19:57 UTC

svn commit: r1513658 [4/4] - in /hadoop/common/branches/HDFS-4949/hadoop-common-project: hadoop-auth-examples/src/main/webapp/ hadoop-auth-examples/src/main/webapp/annonymous/ hadoop-auth-examples/src/main/webapp/kerberos/ hadoop-auth-examples/src/main...

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c Tue Aug 13 21:19:53 2013
@@ -1,19 +1,19 @@
 /*
    LZ4 - Fast LZ compression algorithm
-   Copyright (C) 2011, Yann Collet.
-   BSD License
+   Copyright (C) 2011-2013, Yann Collet.
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
 
    Redistribution and use in source and binary forms, with or without
    modification, are permitted provided that the following conditions are
    met:
-  
+
        * Redistributions of source code must retain the above copyright
    notice, this list of conditions and the following disclaimer.
        * Redistributions in binary form must reproduce the above
    copyright notice, this list of conditions and the following disclaimer
    in the documentation and/or other materials provided with the
    distribution.
-  
+
    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -25,621 +25,672 @@
    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
+   - LZ4 source repository : http://code.google.com/p/lz4/
+*/
+
+/*
+Note : this source file requires "lz4_encoder.h"
 */
 
 //**************************************
-//  Copy from:
-// URL: http://lz4.googlecode.com/svn/trunk/lz4.c
-// Repository Root: http://lz4.googlecode.com/svn
-// Repository UUID: 650e7d94-2a16-8b24-b05c-7c0b3f6821cd
-// Revision: 43
-// Node Kind: file
-// Last Changed Author: yann.collet.73@gmail.com
-// Last Changed Rev: 43
-// Last Changed Date: 2011-12-16 15:41:46 -0800 (Fri, 16 Dec 2011)
-// Sha1: 9db7b2c57698c528d79572e6bce2e7dc33fa5998
+// Tuning parameters
 //**************************************
+// MEMORY_USAGE :
+// Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+// Increasing memory usage improves compression ratio
+// Reduced memory usage can improve speed, due to cache effect
+// Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
+#define MEMORY_USAGE 14
+
+// HEAPMODE :
+// Select how default compression function will allocate memory for its hash table,
+// in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
+// Default allocation strategy is to use stack (HEAPMODE 0)
+// Note : explicit functions *_stack* and *_heap* are unaffected by this setting
+#define HEAPMODE 0
+
 
 //**************************************
-// Compilation Directives
+// CPU Feature Detection
 //**************************************
-#if __STDC_VERSION__ >= 199901L
-  /* "restrict" is a known keyword */
+// 32 or 64 bits ?
+#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
+  || defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) \
+  || defined(__64BIT__) || defined(_LP64) || defined(__LP64__) \
+  || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) )   // Detects 64 bits mode
+#  define LZ4_ARCH64 1
 #else
-#define restrict  // Disable restrict
+#  define LZ4_ARCH64 0
+#endif
+
+// Little Endian or Big Endian ?
+// Overwrite the #define below if you know your architecture endianess
+#if defined (__GLIBC__)
+#  include <endian.h>
+#  if (__BYTE_ORDER == __BIG_ENDIAN)
+#     define LZ4_BIG_ENDIAN 1
+#  endif
+#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
+#  define LZ4_BIG_ENDIAN 1
+#elif defined(__sparc) || defined(__sparc__) \
+   || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
+   || defined(__hpux)  || defined(__hppa) \
+   || defined(_MIPSEB) || defined(__s390__)
+#  define LZ4_BIG_ENDIAN 1
+#else
+// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
+#endif
+
+// Unaligned memory access is automatically enabled for "common" CPU, such as x86.
+// For others CPU, such as ARM, the compiler may be more cautious, inserting unnecessary extra code to ensure aligned access property
+// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
+#if defined(__ARM_FEATURE_UNALIGNED)
+#  define LZ4_FORCE_UNALIGNED_ACCESS 1
+#endif
+
+// Define this parameter if your target system or compiler does not support hardware bit count
+#if defined(_MSC_VER) && defined(_WIN32_WCE)            // Visual Studio for Windows CE does not support Hardware bit count
+#  define LZ4_FORCE_SW_BITCOUNT
 #endif
 
+// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
+// This option may provide a small boost to performance for some big endian cpu, although probably modest.
+// You may set this option to 1 if data will remain within closed environment.
+// This option is useless on Little_Endian CPU (such as x86)
+//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1
+
 
 //**************************************
-// Includes
+// Compiler Options
 //**************************************
-#include <stdlib.h>   // for malloc
-#include <string.h>   // for memset
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)   // C99
+/* "restrict" is a known keyword */
+#else
+#  define restrict // Disable restrict
+#endif
+
+#ifdef _MSC_VER    // Visual Studio
+#  define forceinline static __forceinline
+#  include <intrin.h>                    // For Visual 2005
+#  if LZ4_ARCH64   // 64-bits
+#    pragma intrinsic(_BitScanForward64) // For Visual 2005
+#    pragma intrinsic(_BitScanReverse64) // For Visual 2005
+#  else            // 32-bits
+#    pragma intrinsic(_BitScanForward)   // For Visual 2005
+#    pragma intrinsic(_BitScanReverse)   // For Visual 2005
+#  endif
+#  pragma warning(disable : 4127)        // disable: C4127: conditional expression is constant
+#else 
+#  ifdef __GNUC__
+#    define forceinline static inline __attribute__((always_inline))
+#  else
+#    define forceinline static inline
+#  endif
+#endif
+
+#ifdef _MSC_VER
+#  define lz4_bswap16(x) _byteswap_ushort(x)
+#else
+#  define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
+#endif
+
+#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
+#  define expect(expr,value)    (__builtin_expect ((expr),(value)) )
+#else
+#  define expect(expr,value)    (expr)
+#endif
+
+#define likely(expr)     expect((expr) != 0, 1)
+#define unlikely(expr)   expect((expr) != 0, 0)
 
 
 //**************************************
-// Performance parameter               
+// Includes
 //**************************************
-// Increasing this value improves compression ratio
-// Lowering this value reduces memory usage
-// Lowering may also improve speed, typically on reaching cache size limits (L1 32KB for Intel, 64KB for AMD)
-// Memory usage formula for 32 bits systems : N->2^(N+2) Bytes (examples : 17 -> 512KB ; 12 -> 16KB)
-#define HASH_LOG 12
+#include <stdlib.h>   // for malloc
+#include <string.h>   // for memset
+#include "lz4.h"
 
 
 //**************************************
 // Basic Types
 //**************************************
-#if defined(_MSC_VER)    // Visual Studio does not support 'stdint' natively
-#define BYTE	unsigned __int8
-#define U16		unsigned __int16
-#define U32		unsigned __int32
-#define S32		__int32
-#else
-#include <stdint.h>
-#define BYTE	uint8_t
-#define U16		uint16_t
-#define U32		uint32_t
-#define S32		int32_t
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   // C99
+# include <stdint.h>
+  typedef  uint8_t BYTE;
+  typedef uint16_t U16;
+  typedef uint32_t U32;
+  typedef  int32_t S32;
+  typedef uint64_t U64;
+#else
+  typedef unsigned char       BYTE;
+  typedef unsigned short      U16;
+  typedef unsigned int        U32;
+  typedef   signed int        S32;
+  typedef unsigned long long  U64;
 #endif
 
+#if defined(__GNUC__)  && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
+#  define _PACKED __attribute__ ((packed))
+#else
+#  define _PACKED
+#endif
+
+#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
+#  ifdef __IBMC__
+#    pragma pack(1)
+#  else
+#    pragma pack(push, 1)
+#  endif
+#endif
+
+typedef struct { U16 v; }  _PACKED U16_S;
+typedef struct { U32 v; }  _PACKED U32_S;
+typedef struct { U64 v; }  _PACKED U64_S;
+typedef struct {size_t v;} _PACKED size_t_S;
+
+#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
+#  pragma pack(pop)
+#endif
+
+#define A16(x)   (((U16_S *)(x))->v)
+#define A32(x)   (((U32_S *)(x))->v)
+#define A64(x)   (((U64_S *)(x))->v)
+#define AARCH(x) (((size_t_S *)(x))->v)
+
 
 //**************************************
 // Constants
 //**************************************
+#define HASHTABLESIZE (1 << MEMORY_USAGE)
+
 #define MINMATCH 4
-#define SKIPSTRENGTH 6
-#define STACKLIMIT 13
-#define HEAPMODE (HASH_LOG>STACKLIMIT)  // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()).
-#define COPYTOKEN 4
+
 #define COPYLENGTH 8
 #define LASTLITERALS 5
 #define MFLIMIT (COPYLENGTH+MINMATCH)
 #define MINLENGTH (MFLIMIT+1)
 
+#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1))
+#define SKIPSTRENGTH 6     // Increasing this value will make the compression run slower on incompressible data
+
 #define MAXD_LOG 16
 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
 
-#define HASHTABLESIZE (1 << HASH_LOG)
-#define HASH_MASK (HASHTABLESIZE - 1)
-
-#define ML_BITS 4
-#define ML_MASK ((1U<<ML_BITS)-1)
+#define ML_BITS  4
+#define ML_MASK  ((1U<<ML_BITS)-1)
 #define RUN_BITS (8-ML_BITS)
 #define RUN_MASK ((1U<<RUN_BITS)-1)
 
 
 //**************************************
-// Local structures
+// Architecture-specific macros
 //**************************************
-struct refTables
-{
-	const BYTE* hashTable[HASHTABLESIZE];
-};
-
-#ifdef __GNUC__
-#  define _PACKED __attribute__ ((packed))
-#else
-#  define _PACKED
+#define STEPSIZE                  sizeof(size_t)
+#define LZ4_COPYSTEP(s,d)         { AARCH(d) = AARCH(s); d+=STEPSIZE; s+=STEPSIZE; }
+#define LZ4_COPY8(s,d)            { LZ4_COPYSTEP(s,d); if (STEPSIZE<8) LZ4_COPYSTEP(s,d); }
+#define LZ4_SECURECOPY(s,d,e)     { if ((STEPSIZE==8)&&(d<e)) LZ4_WILDCOPY(s,d,e); }
+
+#if LZ4_ARCH64   // 64-bit
+#  define HTYPE                   U32
+#  define INITBASE(base)          const BYTE* const base = ip
+#else            // 32-bit
+#  define HTYPE                   const BYTE*
+#  define INITBASE(base)          const int base = 0
+#endif
+
+#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
+#  define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
+#  define LZ4_WRITE_LITTLEENDIAN_16(p,i)  { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
+#else      // Little Endian
+#  define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
+#  define LZ4_WRITE_LITTLEENDIAN_16(p,v)  { A16(p) = v; p+=2; }
 #endif
 
-typedef struct _U32_S
-{
-	U32 v;
-} _PACKED U32_S;
-
-typedef struct _U16_S
-{
-	U16 v;
-} _PACKED U16_S;
-
-#define A32(x) (((U32_S *)(x))->v)
-#define A16(x) (((U16_S *)(x))->v)
-
 
 //**************************************
 // Macros
 //**************************************
-#define LZ4_HASH_FUNCTION(i)	(((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG))
-#define LZ4_HASH_VALUE(p)		LZ4_HASH_FUNCTION(A32(p))
-#define LZ4_COPYPACKET(s,d)		A32(d) = A32(s); d+=4; s+=4; A32(d) = A32(s); d+=4; s+=4;
-#define LZ4_WILDCOPY(s,d,e)		do { LZ4_COPYPACKET(s,d) } while (d<e);
-#define LZ4_BLINDCOPY(s,d,l)	{ BYTE* e=d+l; LZ4_WILDCOPY(s,d,e); d=e; }
-
+#define LZ4_WILDCOPY(s,d,e)     { do { LZ4_COPY8(s,d) } while (d<e); }
+#define LZ4_BLINDCOPY(s,d,l)    { BYTE* e=(d)+(l); LZ4_WILDCOPY(s,d,e); d=e; }
 
 
 //****************************
-// Compression CODE
+// Private functions
 //****************************
+#if LZ4_ARCH64
+
+forceinline int LZ4_NbCommonBytes (register U64 val)
+{
+# if defined(LZ4_BIG_ENDIAN)
+#   if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    unsigned long r = 0;
+    _BitScanReverse64( &r, val );
+    return (int)(r>>3);
+#   elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    return (__builtin_clzll(val) >> 3);
+#   else
+    int r;
+    if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
+    if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+    r += (!val);
+    return r;
+#   endif
+# else
+#   if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    unsigned long r = 0;
+    _BitScanForward64( &r, val );
+    return (int)(r>>3);
+#   elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    return (__builtin_ctzll(val) >> 3);
+#   else
+    static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
+    return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+#   endif
+# endif
+}
 
-int LZ4_compressCtx(void** ctx,
-				 char* source, 
-				 char* dest,
-				 int isize)
-{	
-#if HEAPMODE
-	struct refTables *srt = (struct refTables *) (*ctx);
-	const BYTE** HashTable;
 #else
-	const BYTE* HashTable[HASHTABLESIZE] = {0};
-#endif
 
-	const BYTE* ip = (BYTE*) source;       
-	const BYTE* anchor = ip;
-	const BYTE* const iend = ip + isize;
-	const BYTE* const mflimit = iend - MFLIMIT;
-#define matchlimit (iend - LASTLITERALS)
+forceinline int LZ4_NbCommonBytes (register U32 val)
+{
+# if defined(LZ4_BIG_ENDIAN)
+#   if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    unsigned long r = 0;
+    _BitScanReverse( &r, val );
+    return (int)(r>>3);
+#   elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    return (__builtin_clz(val) >> 3);
+#   else
+    int r;
+    if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+    r += (!val);
+    return r;
+#   endif
+# else
+#   if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    unsigned long r;
+    _BitScanForward( &r, val );
+    return (int)(r>>3);
+#   elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    return (__builtin_ctz(val) >> 3);
+#   else
+    static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
+    return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+#   endif
+# endif
+}
 
-	BYTE* op = (BYTE*) dest;
-	
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-	const size_t DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
 #endif
-	int len, length;
-	const int skipStrength = SKIPSTRENGTH;
-	U32 forwardH;
 
 
-	// Init 
-	if (isize<MINLENGTH) goto _last_literals;
-#if HEAPMODE
-	if (*ctx == NULL) 
-	{
-		srt = (struct refTables *) malloc ( sizeof(struct refTables) );
-		*ctx = (void*) srt;
-	}
-	HashTable = srt->hashTable;
-	memset((void*)HashTable, 0, sizeof(srt->hashTable));
-#else
-	(void) ctx;
-#endif
-
-
-	// First Byte
-	HashTable[LZ4_HASH_VALUE(ip)] = ip;
-	ip++; forwardH = LZ4_HASH_VALUE(ip);
-	
-	// Main Loop
-    for ( ; ; ) 
-	{
-		int findMatchAttempts = (1U << skipStrength) + 3;
-		const BYTE* forwardIp = ip;
-		const BYTE* ref;
-		BYTE* token;
-
-		// Find a match
-		do {
-			U32 h = forwardH;
-			int step = findMatchAttempts++ >> skipStrength;
-			ip = forwardIp;
-			forwardIp = ip + step;
-
-			if (forwardIp > mflimit) { goto _last_literals; }
-
-			forwardH = LZ4_HASH_VALUE(forwardIp);
-			ref = HashTable[h];
-			HashTable[h] = ip;
-
-		} while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
-
-		// Catch up
-		while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }  
-
-		// Encode Literal length
-		length = ip - anchor;
-		token = op++;
-		if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } 
-		else *token = (length<<ML_BITS);
-
-		// Copy Literals
-		LZ4_BLINDCOPY(anchor, op, length);
-
-
-_next_match:
-		// Encode Offset
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-		A16(op) = (ip-ref); op+=2;
-#else
-		{ int delta = ip-ref; *op++ = delta; *op++ = delta>>8; }
-#endif
-
-		// Start Counting
-		ip+=MINMATCH; ref+=MINMATCH;   // MinMatch verified
-		anchor = ip;
-		while (ip<matchlimit-3)
-		{
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-			int diff = A32(ref) ^ A32(ip);
-			if (!diff) { ip+=4; ref+=4; continue; }
-			ip += DeBruijnBytePos[((U32)((diff & -diff) * 0x077CB531U)) >> 27];
-#else
-			if (A32(ref) == A32(ip)) { ip+=4; ref+=4; continue; }
-			if (A16(ref) == A16(ip)) { ip+=2; ref+=2; }
-			if (*ref == *ip) ip++;
-#endif
-			goto _endCount;
-		}
-		if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
-		if ((ip<matchlimit) && (*ref == *ip)) ip++;
-_endCount:
-		len = (ip - anchor);
-		
-		// Encode MatchLength
-		if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } 
-		else *token += len;	
-
-		// Test end of chunk
-		if (ip > mflimit) { anchor = ip;  break; }
-
-		// Fill table
-		HashTable[LZ4_HASH_VALUE(ip-2)] = ip-2;
-
-		// Test next position
-		ref = HashTable[LZ4_HASH_VALUE(ip)];
-		HashTable[LZ4_HASH_VALUE(ip)] = ip;
-		if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; }
-
-		// Prepare next loop
-		anchor = ip++; 
-		forwardH = LZ4_HASH_VALUE(ip);
-	}
-
-_last_literals:
-	// Encode Last Literals
-	{
-		int lastRun = iend - anchor;
-		if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } 
-		else *op++ = (lastRun<<ML_BITS);
-		memcpy(op, anchor, iend - anchor);
-		op += iend-anchor;
-	} 
 
-	// End
-	return (int) (((char*)op)-dest);
-}
+//******************************
+// Compression functions
+//******************************
 
+/*
+int LZ4_compress_stack(
+                 const char* source,
+                 char* dest,
+                 int inputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
+return : the number of bytes written in buffer 'dest'
+*/
+#define FUNCTION_NAME LZ4_compress_stack
+#include "lz4_encoder.h"
 
 
-// Note : this function is valid only if isize < LZ4_64KLIMIT
-#define LZ4_64KLIMIT ((1U<<16) + (MFLIMIT-1))
-#define HASHLOG64K (HASH_LOG+1)
-#define LZ4_HASH64K_FUNCTION(i)	(((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG64K))
-#define LZ4_HASH64K_VALUE(p)	LZ4_HASH64K_FUNCTION(A32(p))
-int LZ4_compress64kCtx(void** ctx,
-				 char* source, 
-				 char* dest,
-				 int isize)
-{	
-#if HEAPMODE
-	struct refTables *srt = (struct refTables *) (*ctx);
-	U16* HashTable;
-#else
-	U16 HashTable[HASHTABLESIZE<<1] = {0};
-#endif
+/*
+int LZ4_compress_stack_limitedOutput(
+                 const char* source,
+                 char* dest,
+                 int inputSize,
+                 int maxOutputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
+If it cannot achieve it, compression will stop, and result of the function will be zero.
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
+*/
+#define FUNCTION_NAME LZ4_compress_stack_limitedOutput
+#define LIMITED_OUTPUT
+#include "lz4_encoder.h"
 
-	const BYTE* ip = (BYTE*) source;       
-	const BYTE* anchor = ip;
-	const BYTE* const base = ip;
-	const BYTE* const iend = ip + isize;
-	const BYTE* const mflimit = iend - MFLIMIT;
-#define matchlimit (iend - LASTLITERALS)
 
-	BYTE* op = (BYTE*) dest;
-	
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-	const size_t DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
-#endif
-	int len, length;
-	const int skipStrength = SKIPSTRENGTH;
-	U32 forwardH;
+/*
+int LZ4_compress64k_stack(
+                 const char* source,
+                 char* dest,
+                 int inputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
+This function compresses better than LZ4_compress_stack(), on the condition that
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
+return : the number of bytes written in buffer 'dest', or 0 if compression fails
+*/
+#define FUNCTION_NAME LZ4_compress64k_stack
+#define COMPRESS_64K
+#include "lz4_encoder.h"
 
 
-	// Init 
-	if (isize<MINLENGTH) goto _last_literals;
-#if HEAPMODE
-	if (*ctx == NULL) 
-	{
-		srt = (struct refTables *) malloc ( sizeof(struct refTables) );
-		*ctx = (void*) srt;
-	}
-	HashTable = (U16*)(srt->hashTable);
-	memset((void*)HashTable, 0, sizeof(srt->hashTable));
-#else
-	(void) ctx;
-#endif
-
-
-	// First Byte
-	ip++; forwardH = LZ4_HASH64K_VALUE(ip);
-	
-	// Main Loop
-    for ( ; ; ) 
-	{
-		int findMatchAttempts = (1U << skipStrength) + 3;
-		const BYTE* forwardIp = ip;
-		const BYTE* ref;
-		BYTE* token;
-
-		// Find a match
-		do {
-			U32 h = forwardH;
-			int step = findMatchAttempts++ >> skipStrength;
-			ip = forwardIp;
-			forwardIp = ip + step;
-
-			if (forwardIp > mflimit) { goto _last_literals; }
-
-			forwardH = LZ4_HASH64K_VALUE(forwardIp);
-			ref = base + HashTable[h];
-			HashTable[h] = ip - base;
-
-		} while (A32(ref) != A32(ip));
-
-		// Catch up
-		while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }  
-
-		// Encode Literal length
-		length = ip - anchor;
-		token = op++;
-		if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } 
-		else *token = (length<<ML_BITS);
-
-		// Copy Literals
-		LZ4_BLINDCOPY(anchor, op, length);
-
-
-_next_match:
-		// Encode Offset
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-		A16(op) = (ip-ref); op+=2;
-#else
-		{ int delta = ip-ref; *op++ = delta; *op++ = delta>>8; }
-#endif
-
-		// Start Counting
-		ip+=MINMATCH; ref+=MINMATCH;   // MinMatch verified
-		anchor = ip;
-		while (ip<matchlimit-3)
-		{
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-			int diff = A32(ref) ^ A32(ip);
-			if (!diff) { ip+=4; ref+=4; continue; }
-			ip += DeBruijnBytePos[((U32)((diff & -diff) * 0x077CB531U)) >> 27];
-#else
-			if (A32(ref) == A32(ip)) { ip+=4; ref+=4; continue; }
-			if (A16(ref) == A16(ip)) { ip+=2; ref+=2; }
-			if (*ref == *ip) ip++;
-#endif
-			goto _endCount;
-		}
-		if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
-		if ((ip<matchlimit) && (*ref == *ip)) ip++;
-_endCount:
-		len = (ip - anchor);
-		
-		// Encode MatchLength
-		if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } 
-		else *token += len;	
-
-		// Test end of chunk
-		if (ip > mflimit) { anchor = ip;  break; }
-
-		// Test next position
-		ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
-		HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base;
-		if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; }
-
-		// Prepare next loop
-		anchor = ip++; 
-		forwardH = LZ4_HASH64K_VALUE(ip);
-	}
-
-_last_literals:
-	// Encode Last Literals
-	{
-		int lastRun = iend - anchor;
-		if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } 
-		else *op++ = (lastRun<<ML_BITS);
-		memcpy(op, anchor, iend - anchor);
-		op += iend-anchor;
-	} 
+/*
+int LZ4_compress64k_stack_limitedOutput(
+                 const char* source,
+                 char* dest,
+                 int inputSize,
+                 int maxOutputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
+This function compresses better than LZ4_compress_stack_limitedOutput(), on the condition that
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
+If it cannot achieve it, compression will stop, and result of the function will be zero.
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
+*/
+#define FUNCTION_NAME LZ4_compress64k_stack_limitedOutput
+#define COMPRESS_64K
+#define LIMITED_OUTPUT
+#include "lz4_encoder.h"
+
+
+/*
+void* LZ4_createHeapMemory();
+int LZ4_freeHeapMemory(void* ctx);
+
+Used to allocate and free hashTable memory 
+to be used by the LZ4_compress_heap* family of functions.
+LZ4_createHeapMemory() returns NULL is memory allocation fails.
+*/
+void* LZ4_create() { return malloc(HASHTABLESIZE); }
+int   LZ4_free(void* ctx) { free(ctx); return 0; }
+
+
+/*
+int LZ4_compress_heap(
+                 void* ctx,
+                 const char* source,
+                 char* dest,
+                 int inputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
+return : the number of bytes written in buffer 'dest'
+*/
+#define FUNCTION_NAME LZ4_compress_heap
+#define USE_HEAPMEMORY
+#include "lz4_encoder.h"
 
-	// End
-	return (int) (((char*)op)-dest);
-}
 
+/*
+int LZ4_compress_heap_limitedOutput(
+                 void* ctx,
+                 const char* source,
+                 char* dest,
+                 int inputSize,
+                 int maxOutputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
+If it cannot achieve it, compression will stop, and result of the function will be zero.
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
+*/
+#define FUNCTION_NAME LZ4_compress_heap_limitedOutput
+#define LIMITED_OUTPUT
+#define USE_HEAPMEMORY
+#include "lz4_encoder.h"
+
+
+/*
+int LZ4_compress64k_heap(
+                 void* ctx,
+                 const char* source,
+                 char* dest,
+                 int inputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
+return : the number of bytes written in buffer 'dest'
+*/
+#define FUNCTION_NAME LZ4_compress64k_heap
+#define COMPRESS_64K
+#define USE_HEAPMEMORY
+#include "lz4_encoder.h"
 
 
-int LZ4_compress(char* source, 
-				 char* dest,
-				 int isize)
+/*
+int LZ4_compress64k_heap_limitedOutput(
+                 void* ctx,
+                 const char* source,
+                 char* dest,
+                 int inputSize,
+                 int maxOutputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
+If it cannot achieve it, compression will stop, and result of the function will be zero.
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
+*/
+#define FUNCTION_NAME LZ4_compress64k_heap_limitedOutput
+#define COMPRESS_64K
+#define LIMITED_OUTPUT
+#define USE_HEAPMEMORY
+#include "lz4_encoder.h"
+
+
+int LZ4_compress(const char* source, char* dest, int inputSize)
 {
 #if HEAPMODE
-	void* ctx = malloc(sizeof(struct refTables));
-	int result;
-	if (isize < LZ4_64KLIMIT)
-		result = LZ4_compress64kCtx(&ctx, source, dest, isize);
-	else result = LZ4_compressCtx(&ctx, source, dest, isize);
-	free(ctx);
-	return result;
+    void* ctx = LZ4_create();
+    int result;
+    if (ctx == NULL) return 0;    // Failed allocation => compression not done
+    if (inputSize < LZ4_64KLIMIT)
+        result = LZ4_compress64k_heap(ctx, source, dest, inputSize);
+    else result = LZ4_compress_heap(ctx, source, dest, inputSize);
+    LZ4_free(ctx);
+    return result;
 #else
-	if (isize < (int)LZ4_64KLIMIT) return LZ4_compress64kCtx(NULL, source, dest, isize);
-	return LZ4_compressCtx(NULL, source, dest, isize);
+    if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack(source, dest, inputSize);
+    return LZ4_compress_stack(source, dest, inputSize);
 #endif
 }
 
 
+int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+#if HEAPMODE
+    void* ctx = LZ4_create();
+    int result;
+    if (ctx == NULL) return 0;    // Failed allocation => compression not done
+    if (inputSize < LZ4_64KLIMIT)
+        result = LZ4_compress64k_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize);
+    else result = LZ4_compress_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize);
+    LZ4_free(ctx);
+    return result;
+#else
+    if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack_limitedOutput(source, dest, inputSize, maxOutputSize);
+    return LZ4_compress_stack_limitedOutput(source, dest, inputSize, maxOutputSize);
+#endif
+}
 
 
 //****************************
-// Decompression CODE
+// Decompression functions
 //****************************
 
-// Note : The decoding functions LZ4_uncompress() and LZ4_uncompress_unknownOutputSize() 
-//		are safe against "buffer overflow" attack type
-//		since they will *never* write outside of the provided output buffer :
-//		they both check this condition *before* writing anything.
-//		A corrupted packet however can make them *read* within the first 64K before the output buffer.
-
-int LZ4_uncompress(char* source, 
-				 char* dest,
-				 int osize)
-{	
-	// Local Variables
-	const BYTE* restrict ip = (const BYTE*) source;
-	const BYTE* restrict ref;
-
-	BYTE* restrict op = (BYTE*) dest;
-	BYTE* const oend = op + osize;
-	BYTE* cpy;
-
-	BYTE token;
-	
-	U32	dec[4]={0, 3, 2, 3};
-	int	len, length;
-
-
-	// Main Loop
-	while (1)
-	{
-		// get runlength
-		token = *ip++;
-		if ((length=(token>>ML_BITS)) == RUN_MASK)  { for (;(len=*ip++)==255;length+=255){} length += len; } 
-
-		// copy literals
-		cpy = op+length;
-		if (cpy>oend-COPYLENGTH) 
-		{ 
-			if (cpy > oend) goto _output_error;
-			memcpy(op, ip, length);
-			ip += length;
-			break;    // Necessarily EOF
-		}
-		LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
-
-
-		// get offset
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-		ref = cpy - A16(ip); ip+=2;
-#else
-		{ int delta = *ip++; delta += *ip++ << 8; ref = cpy - delta; }
-#endif
-
-		// get matchlength
-		if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; } 
-
-		// copy repeated sequence
-		if (op-ref<COPYTOKEN)
-		{
-			*op++ = *ref++;
-			*op++ = *ref++;
-			*op++ = *ref++;
-			*op++ = *ref++;
-			ref -= dec[op-ref];
-			A32(op)=A32(ref); 
-		} else { A32(op)=A32(ref); op+=4; ref+=4; }
-		cpy = op + length;
-		if (cpy > oend-COPYLENGTH)
-		{
-			if (cpy > oend) goto _output_error;	
-			LZ4_WILDCOPY(ref, op, (oend-COPYLENGTH));
-			while(op<cpy) *op++=*ref++;
-			op=cpy;
-			if (op == oend) break;    // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
-			continue;
-		}
-		LZ4_WILDCOPY(ref, op, cpy);
-		op=cpy;		// correction
-	}
-
-	// end of decoding
-	return (int) (((char*)ip)-source);
+typedef enum { noPrefix = 0, withPrefix = 1 } prefix64k_directive;
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
+typedef enum { full = 0, partial = 1 } earlyEnd_directive;
+
+
+// This generic decompression function cover all use cases.
+// It shall be instanciated several times, using different sets of directives
+// Note that it is essential this generic function is really inlined, 
+// in order to remove useless branches during compilation optimisation.
+forceinline int LZ4_decompress_generic(
+                 const char* source,
+                 char* dest,
+                 int inputSize,          //
+                 int outputSize,         // OutputSize must be != 0; if endOnInput==endOnInputSize, this value is the max size of Output Buffer.
+
+                 int endOnInput,         // endOnOutputSize, endOnInputSize
+                 int prefix64k,          // noPrefix, withPrefix
+                 int partialDecoding,    // full, partial
+                 int targetOutputSize    // only used if partialDecoding==partial
+                 )
+{
+    // Local Variables
+    const BYTE* restrict ip = (const BYTE*) source;
+    const BYTE* ref;
+    const BYTE* const iend = ip + inputSize;
+
+    BYTE* op = (BYTE*) dest;
+    BYTE* const oend = op + outputSize;
+    BYTE* cpy;
+    BYTE* oexit = op + targetOutputSize;
+
+    size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
+#if LZ4_ARCH64
+    size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
+#endif
+
+
+    // Special cases
+    if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT;                        // targetOutputSize too high => decode everything
+    if ((endOnInput) && unlikely(outputSize==0)) return ((inputSize==1) && (*ip==0)) ? 0 : -1;   // Empty output buffer
+    if ((!endOnInput) && unlikely(outputSize==0)) return (*ip==0?1:-1);
+
+
+    // Main Loop
+    while (1)
+    {
+        unsigned token;
+        size_t length;
+
+        // get runlength
+        token = *ip++;
+        if ((length=(token>>ML_BITS)) == RUN_MASK)
+        { 
+            unsigned s=255; 
+            while (((endOnInput)?ip<iend:1) && (s==255))
+            { 
+                s = *ip++; 
+                length += s; 
+            } 
+        }
+
+        // copy literals
+        cpy = op+length;
+        if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
+            || ((!endOnInput) && (cpy>oend-COPYLENGTH)))
+        {
+            if (partialDecoding)
+            {
+                if (cpy > oend) goto _output_error;                           // Error : write attempt beyond end of output buffer
+                if ((endOnInput) && (ip+length > iend)) goto _output_error;   // Error : read attempt beyond end of input buffer
+            }
+            else
+            {
+                if ((!endOnInput) && (cpy != oend)) goto _output_error;       // Error : block decoding must stop exactly there
+                if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error;   // Error : input must be consumed
+            }
+            memcpy(op, ip, length);
+            ip += length;
+            op += length;
+            break;                                       // Necessarily EOF, due to parsing restrictions
+        }
+        LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
+
+        // get offset
+        LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
+        if ((prefix64k==noPrefix) && unlikely(ref < (BYTE* const)dest)) goto _output_error;   // Error : offset outside destination buffer
+
+        // get matchlength
+        if ((length=(token&ML_MASK)) == ML_MASK) 
+        { 
+            for ( ; (!endOnInput) || (ip<iend-(LASTLITERALS+1)) ; )   // Ensure enough bytes remain for LASTLITERALS + token
+            {
+                unsigned s = *ip++; 
+                length += s; 
+                if (s==255) continue; 
+                break; 
+            }
+        }
+
+        // copy repeated sequence
+        if unlikely((op-ref)<(int)STEPSIZE)
+        {
+#if LZ4_ARCH64
+            size_t dec64 = dec64table[op-ref];
+#else
+            const size_t dec64 = 0;
+#endif
+            op[0] = ref[0];
+            op[1] = ref[1];
+            op[2] = ref[2];
+            op[3] = ref[3];
+            op += 4, ref += 4; ref -= dec32table[op-ref];
+            A32(op) = A32(ref); 
+            op += STEPSIZE-4; ref -= dec64;
+        } else { LZ4_COPYSTEP(ref,op); }
+        cpy = op + length - (STEPSIZE-4);
+
+        if unlikely(cpy>oend-(COPYLENGTH)-(STEPSIZE-4))
+        {
+            if (cpy > oend-LASTLITERALS) goto _output_error;    // Error : last 5 bytes must be literals
+            LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
+            while(op<cpy) *op++=*ref++;
+            op=cpy;
+            continue;
+        }
+        LZ4_WILDCOPY(ref, op, cpy);
+        op=cpy;   // correction
+    }
+
+    // end of decoding
+    if (endOnInput)
+       return (int) (((char*)op)-dest);     // Nb of output bytes decoded
+    else
+       return (int) (((char*)ip)-source);   // Nb of input bytes read
 
-	// write overflow error detected
+    // Overflow error detected
 _output_error:
-	return (int) (-(((char*)ip)-source));
+    return (int) (-(((char*)ip)-source))-1;
 }
 
 
-int LZ4_uncompress_unknownOutputSize(
-				char* source, 
-				char* dest,
-				int isize,
-				int maxOutputSize)
-{	
-	// Local Variables
-	const BYTE* restrict ip = (const BYTE*) source;
-	const BYTE* const iend = ip + isize;
-	const BYTE* restrict ref;
-
-	BYTE* restrict op = (BYTE*) dest;
-	BYTE* const oend = op + maxOutputSize;
-	BYTE* cpy;
-
-	BYTE token;
-	
-	U32	dec[4]={0, 3, 2, 3};
-	int	len, length;
-
-
-	// Main Loop
-	while (ip<iend)
-	{
-		// get runlength
-		token = *ip++;
-		if ((length=(token>>ML_BITS)) == RUN_MASK)  { for (;(len=*ip++)==255;length+=255){} length += len; } 
-
-		// copy literals
-		cpy = op+length;
-		if (cpy>oend-COPYLENGTH) 
-		{ 
-			if (cpy > oend) goto _output_error;
-			memcpy(op, ip, length);
-			op += length;
-			break;    // Necessarily EOF
-		}
-		LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
-		if (ip>=iend) break;    // check EOF
-
-		// get offset
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-		ref = cpy - A16(ip); ip+=2;
-#else
-		{ int delta = *ip++; delta += *ip++ << 8; ref = cpy - delta; }
-#endif
-
-		// get matchlength
-		if ((length=(token&ML_MASK)) == ML_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; }
-
-		// copy repeated sequence
-		if (op-ref<COPYTOKEN)
-		{
-			*op++ = *ref++;
-			*op++ = *ref++;
-			*op++ = *ref++;
-			*op++ = *ref++;
-			ref -= dec[op-ref];
-			A32(op)=A32(ref); 
-		} else { A32(op)=A32(ref); op+=4; ref+=4; }
-		cpy = op + length;
-		if (cpy>oend-COPYLENGTH)
-		{
-			if (cpy > oend) goto _output_error;	
-			LZ4_WILDCOPY(ref, op, (oend-COPYLENGTH));
-			while(op<cpy) *op++=*ref++;
-			op=cpy;
-			if (op == oend) break;    // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
-			continue;
-		}
-		LZ4_WILDCOPY(ref, op, cpy);
-		op=cpy;		// correction
-	}
+int LZ4_decompress_safe(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+    return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, full, 0);
+}
 
-	// end of decoding
-	return (int) (((char*)op)-dest);
+int LZ4_decompress_fast(const char* source, char* dest, int outputSize)
+{
+    return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, noPrefix, full, 0);
+}
 
-	// write overflow error detected
-_output_error:
-	return (int) (-(((char*)ip)-source));
+int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+    return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, withPrefix, full, 0);
+}
+
+int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int outputSize)
+{
+    return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, withPrefix, full, 0);
+}
+
+int LZ4_decompress_safe_partial(const char* source, char* dest, int inputSize, int targetOutputSize, int maxOutputSize)
+{
+    return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, partial, targetOutputSize);
 }
 

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto Tue Aug 13 21:19:53 2013
@@ -141,6 +141,7 @@ message RpcSaslProto {
     INITIATE  = 2;
     CHALLENGE = 3;
     RESPONSE  = 4;
+    WRAP = 5;
   }
   
   message SaslAuth {

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/webapps/static/hadoop.css
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/webapps/static/hadoop.css?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/webapps/static/hadoop.css (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/webapps/static/hadoop.css Tue Aug 13 21:19:53 2013
@@ -45,8 +45,7 @@ div#dfsnodetable a#title {
 	font-weight : bolder;
 }
 
-div#dfsnodetable td, th {
-	border-bottom-style : none;
+div#dfsnodetable td, th {	
         padding-bottom : 4px;
         padding-top : 4px;
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm Tue Aug 13 21:19:53 2013
@@ -45,6 +45,27 @@ bin/hadoop fs <args>
    Differences are described with each of the commands. Error information is
    sent to stderr and the output is sent to stdout.
 
+appendToFile
+
+      Usage: <<<hdfs dfs -appendToFile <localsrc> ... <dst> >>>
+
+      Append single src, or multiple srcs from local file system to the
+      destination file system. Also reads input from stdin and appends to
+      destination file system.
+
+        * <<<hdfs dfs -appendToFile localfile /user/hadoop/hadoopfile>>>
+
+        * <<<hdfs dfs -appendToFile localfile1 localfile2 /user/hadoop/hadoopfile>>>
+
+        * <<<hdfs dfs -appendToFile localfile hdfs://nn.example.com/hadoop/hadoopfile>>>
+
+        * <<<hdfs dfs -appendToFile - hdfs://nn.example.com/hadoop/hadoopfile>>>
+          Reads the input from stdin.
+
+      Exit Code:
+
+      Returns 0 on success and 1 on error.
+
 cat
 
    Usage: <<<hdfs dfs -cat URI [URI ...]>>>
@@ -76,7 +97,7 @@ chmod
 
    Change the permissions of files. With -R, make the change recursively
    through the directory structure. The user must be the owner of the file, or
-   else a super-user. Additional information is in the 
+   else a super-user. Additional information is in the
    {{{betterurl}Permissions Guide}}.
 
 chown

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java Tue Aug 13 21:19:53 2013
@@ -71,7 +71,8 @@ public abstract class FSTestWrapper impl
 
   public String getAbsoluteTestRootDir() throws IOException {
     if (absTestRootDir == null) {
-      if (testRootDir.startsWith("/")) {
+      Path testRootPath = new Path(testRootDir);
+      if (testRootPath.isAbsolute()) {
         absTestRootDir = testRootDir;
       } else {
         absTestRootDir = getWorkingDirectory().toString() + "/"

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSWrapper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSWrapper.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSWrapper.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSWrapper.java Tue Aug 13 21:19:53 2013
@@ -109,4 +109,7 @@ public interface FSWrapper {
   abstract public FileStatus[] listStatus(final Path f)
       throws AccessControlException, FileNotFoundException,
       UnsupportedFileSystemException, IOException;
+  
+  abstract public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
+      throws IOException;
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestWrapper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestWrapper.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestWrapper.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestWrapper.java Tue Aug 13 21:19:53 2013
@@ -332,4 +332,10 @@ public final class FileContextTestWrappe
       FileNotFoundException, UnsupportedFileSystemException, IOException {
     return fc.util().listStatus(f);
   }
+
+  @Override
+  public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
+      throws IOException {
+    return fc.util().globStatus(pathPattern, filter);
+  }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestWrapper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestWrapper.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestWrapper.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestWrapper.java Tue Aug 13 21:19:53 2013
@@ -397,4 +397,10 @@ public final class FileSystemTestWrapper
       FileNotFoundException, UnsupportedFileSystemException, IOException {
     return fs.listStatus(f);
   }
+
+  @Override
+  public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
+      throws IOException {
+    return fs.globStatus(pathPattern, filter);
+  }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java Tue Aug 13 21:19:53 2013
@@ -20,13 +20,10 @@ package org.apache.hadoop.fs;
 import java.io.*;
 import java.net.URI;
 import java.util.EnumSet;
-import org.apache.hadoop.fs.FileContext;
+
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import static org.junit.Assert.*;
@@ -51,6 +48,13 @@ public abstract class SymlinkBaseTest {
   abstract protected String testBaseDir2() throws IOException;
   abstract protected URI testURI();
 
+  // Returns true if the filesystem is emulating symlink support. Certain
+  // checks will be bypassed if that is the case.
+  //
+  protected boolean emulatingSymlinksOnWindows() {
+    return false;
+  }
+
   protected IOException unwrapException(IOException e) {
     return e;
   }
@@ -156,8 +160,11 @@ public abstract class SymlinkBaseTest {
   @Test(timeout=10000)
   /** Try to create a directory given a path that refers to a symlink */
   public void testMkdirExistingLink() throws IOException {
+    Path file = new Path(testBaseDir1() + "/targetFile");
+    createAndWriteFile(file);
+
     Path dir  = new Path(testBaseDir1()+"/link");
-    wrapper.createSymlink(new Path("/doesNotExist"), dir, false);
+    wrapper.createSymlink(file, dir, false);
     try {
       wrapper.mkdir(dir, FileContext.DEFAULT_PERM, false);
       fail("Created a dir where a symlink exists");
@@ -224,6 +231,7 @@ public abstract class SymlinkBaseTest {
   @Test(timeout=10000)
   /** Stat a link to a file */
   public void testStatLinkToFile() throws IOException {
+    assumeTrue(!emulatingSymlinksOnWindows());
     Path file = new Path(testBaseDir1()+"/file");
     Path linkToFile = new Path(testBaseDir1()+"/linkToFile");
     createAndWriteFile(file);
@@ -232,8 +240,7 @@ public abstract class SymlinkBaseTest {
     assertTrue(wrapper.isSymlink(linkToFile));
     assertTrue(wrapper.isFile(linkToFile));
     assertFalse(wrapper.isDir(linkToFile));
-    assertEquals(file.toUri().getPath(),
-                 wrapper.getLinkTarget(linkToFile).toString());
+    assertEquals(file, wrapper.getLinkTarget(linkToFile));
     // The local file system does not fully resolve the link
     // when obtaining the file status
     if (!"file".equals(getScheme())) {
@@ -277,8 +284,7 @@ public abstract class SymlinkBaseTest {
     assertFalse(wrapper.isFile(linkToDir));
     assertTrue(wrapper.isDir(linkToDir));
 
-    assertEquals(dir.toUri().getPath(),
-                 wrapper.getLinkTarget(linkToDir).toString());
+    assertEquals(dir, wrapper.getLinkTarget(linkToDir));
   }
 
   @Test(timeout=10000)
@@ -351,6 +357,12 @@ public abstract class SymlinkBaseTest {
   /* Assert that the given link to a file behaves as expected. */
   private void checkLink(Path linkAbs, Path expectedTarget, Path targetQual)
       throws IOException {
+
+    // If we are emulating symlinks then many of these checks will fail
+    // so we skip them.
+    //
+    assumeTrue(!emulatingSymlinksOnWindows());
+
     Path dir = new Path(testBaseDir1());
     // isFile/Directory
     assertTrue(wrapper.isFile(linkAbs));
@@ -400,7 +412,7 @@ public abstract class SymlinkBaseTest {
       failureExpected = false;
     }
     try {
-      readFile(new Path(getScheme()+"://"+testBaseDir1()+"/linkToFile"));
+      readFile(new Path(getScheme()+":///"+testBaseDir1()+"/linkToFile"));
       assertFalse(failureExpected);
     } catch (Exception e) {
       if (!failureExpected) {
@@ -646,6 +658,7 @@ public abstract class SymlinkBaseTest {
   @Test(timeout=10000)
   /** Create symlink through a symlink */
   public void testCreateLinkViaLink() throws IOException {
+    assumeTrue(!emulatingSymlinksOnWindows());
     Path dir1        = new Path(testBaseDir1());
     Path file        = new Path(testBaseDir1(), "file");
     Path linkToDir   = new Path(testBaseDir2(), "linkToDir");
@@ -688,6 +701,7 @@ public abstract class SymlinkBaseTest {
   @Test(timeout=10000)
   /** Test create symlink using the same path */
   public void testCreateLinkTwice() throws IOException {
+    assumeTrue(!emulatingSymlinksOnWindows());
     Path file = new Path(testBaseDir1(), "file");
     Path link = new Path(testBaseDir1(), "linkToFile");
     createAndWriteFile(file);
@@ -783,7 +797,7 @@ public abstract class SymlinkBaseTest {
     Path linkToDir   = new Path(testBaseDir2(), "linkToDir");
     Path fileViaLink = new Path(linkToDir,      "test/file");
     // Symlink to .. is not a problem since the .. is squashed early
-    assertEquals(testBaseDir1(), dotDot.toString());
+    assertEquals(new Path(testBaseDir1()), dotDot);
     createAndWriteFile(file);
     wrapper.createSymlink(dotDot, linkToDir, false);
     readFile(fileViaLink);
@@ -876,7 +890,8 @@ public abstract class SymlinkBaseTest {
     assertFalse(wrapper.exists(linkViaLink));
     // Check that we didn't rename the link target
     assertTrue(wrapper.exists(file));
-    assertTrue(wrapper.getFileLinkStatus(linkNewViaLink).isSymlink());
+    assertTrue(wrapper.getFileLinkStatus(linkNewViaLink).isSymlink() ||
+        emulatingSymlinksOnWindows());
     readFile(linkNewViaLink);
   }
 
@@ -1014,7 +1029,8 @@ public abstract class SymlinkBaseTest {
     createAndWriteFile(file);
     wrapper.createSymlink(file, link1, false);
     wrapper.rename(link1, link2);
-    assertTrue(wrapper.getFileLinkStatus(link2).isSymlink());
+    assertTrue(wrapper.getFileLinkStatus(link2).isSymlink() ||
+        emulatingSymlinksOnWindows());
     readFile(link2);
     readFile(file);
     assertFalse(wrapper.exists(link1));
@@ -1038,8 +1054,11 @@ public abstract class SymlinkBaseTest {
     }
     wrapper.rename(link, file1, Rename.OVERWRITE);
     assertFalse(wrapper.exists(link));
-    assertTrue(wrapper.getFileLinkStatus(file1).isSymlink());
-    assertEquals(file2, wrapper.getLinkTarget(file1));
+
+    if (!emulatingSymlinksOnWindows()) {
+      assertTrue(wrapper.getFileLinkStatus(file1).isSymlink());
+      assertEquals(file2, wrapper.getLinkTarget(file1));
+    }
   }
 
   @Test(timeout=10000)
@@ -1078,16 +1097,21 @@ public abstract class SymlinkBaseTest {
   @Test(timeout=10000)
   /** Rename a symlink to itself */
   public void testRenameSymlinkToItself() throws IOException {
+    Path file = new Path(testBaseDir1(), "file");
+    createAndWriteFile(file);
+
     Path link = new Path(testBaseDir1(), "linkToFile1");
-    wrapper.createSymlink(new Path("/doestNotExist"), link, false);
+    wrapper.createSymlink(file, link, false);
     try {
       wrapper.rename(link, link);
+      fail("Failed to get expected IOException");
     } catch (IOException e) {
       assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
     }
     // Fails with overwrite as well
     try {
       wrapper.rename(link, link, Rename.OVERWRITE);
+      fail("Failed to get expected IOException");
     } catch (IOException e) {
       assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
     }
@@ -1096,6 +1120,7 @@ public abstract class SymlinkBaseTest {
   @Test(timeout=10000)
   /** Rename a symlink */
   public void testRenameSymlink() throws IOException {
+    assumeTrue(!emulatingSymlinksOnWindows());
     Path file  = new Path(testBaseDir1(), "file");
     Path link1 = new Path(testBaseDir1(), "linkToFile1");
     Path link2 = new Path(testBaseDir1(), "linkToFile2");
@@ -1193,6 +1218,7 @@ public abstract class SymlinkBaseTest {
   @Test(timeout=10000)
   /** Test rename the symlink's target */
   public void testRenameLinkTarget() throws IOException {
+    assumeTrue(!emulatingSymlinksOnWindows());
     Path file    = new Path(testBaseDir1(), "file");
     Path fileNew = new Path(testBaseDir1(), "fileNew");
     Path link    = new Path(testBaseDir1(), "linkToFile");

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java Tue Aug 13 21:19:53 2013
@@ -793,6 +793,8 @@ public class TestFileUtil {
         }
       }
       List<String> actualClassPaths = Arrays.asList(classPathAttr.split(" "));
+      Collections.sort(expectedClassPaths);
+      Collections.sort(actualClassPaths);
       Assert.assertEquals(expectedClassPaths, actualClassPaths);
     } finally {
       if (jarFile != null) {

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java Tue Aug 13 21:19:53 2013
@@ -28,11 +28,38 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.io.AvroTestUtil;
 import org.apache.hadoop.util.Shell;
 
-import junit.framework.TestCase;
+import com.google.common.base.Joiner;
 
-import static org.junit.Assert.fail;
+import junit.framework.TestCase;
 
 public class TestPath extends TestCase {
+  /**
+   * Merge a bunch of Path objects into a sorted semicolon-separated
+   * path string.
+   */
+  public static String mergeStatuses(Path paths[]) {
+    String pathStrings[] = new String[paths.length];
+    int i = 0;
+    for (Path path : paths) {
+      pathStrings[i++] = path.toUri().getPath();
+    }
+    Arrays.sort(pathStrings);
+    return Joiner.on(";").join(pathStrings);
+  }
+
+  /**
+   * Merge a bunch of FileStatus objects into a sorted semicolon-separated
+   * path string.
+   */
+  public static String mergeStatuses(FileStatus statuses[]) {
+    Path paths[] = new Path[statuses.length];
+    int i = 0;
+    for (FileStatus status : statuses) {
+      paths[i++] = status.getPath();
+    }
+    return mergeStatuses(paths);
+  }
+
   @Test (timeout = 30000)
   public void testToString() {
     toStringTest("/");
@@ -352,10 +379,11 @@ public class TestPath extends TestCase {
     // ensure globStatus with "*" finds all dir contents
     stats = lfs.globStatus(new Path(testRoot, "*"));
     Arrays.sort(stats);
-    assertEquals(paths.length, stats.length);
-    for (int i=0; i < paths.length; i++) {
-      assertEquals(paths[i].getParent(), stats[i].getPath());
+    Path parentPaths[] = new Path[paths.length];
+    for (int i = 0; i < paths.length; i++) {
+      parentPaths[i] = paths[i].getParent();
     }
+    assertEquals(mergeStatuses(parentPaths), mergeStatuses(stats));
 
     // ensure that globStatus with an escaped "\*" only finds "*"
     stats = lfs.globStatus(new Path(testRoot, "\\*"));
@@ -365,9 +393,7 @@ public class TestPath extends TestCase {
     // try to glob the inner file for all dirs
     stats = lfs.globStatus(new Path(testRoot, "*/f"));
     assertEquals(paths.length, stats.length);
-    for (int i=0; i < paths.length; i++) {
-      assertEquals(paths[i], stats[i].getPath());
-    }
+    assertEquals(mergeStatuses(paths), mergeStatuses(stats));
 
     // try to get the inner file for only the "*" dir
     stats = lfs.globStatus(new Path(testRoot, "\\*/f"));

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java Tue Aug 13 21:19:53 2013
@@ -30,6 +30,7 @@ import java.net.URI;
 import java.net.URISyntaxException;
 
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
 import org.junit.Test;
 
 /**
@@ -62,6 +63,16 @@ abstract public class TestSymlinkLocalFS
   }
 
   @Override
+  protected boolean emulatingSymlinksOnWindows() {
+    // Java 6 on Windows has very poor symlink support. Specifically
+    // Specifically File#length and File#renameTo do not work as expected.
+    // (see HADOOP-9061 for additional details)
+    // Hence some symlink tests will be skipped.
+    //
+    return (Shell.WINDOWS && !Shell.isJava7OrAbove());
+  }
+
+  @Override
   public void testCreateDanglingLink() throws IOException {
     // Dangling symlinks are not supported on Windows local file system.
     assumeTrue(!Path.WINDOWS);
@@ -171,6 +182,7 @@ abstract public class TestSymlinkLocalFS
    * file scheme (eg file://host/tmp/test).
    */  
   public void testGetLinkStatusPartQualTarget() throws IOException {
+    assumeTrue(!emulatingSymlinksOnWindows());
     Path fileAbs  = new Path(testBaseDir1()+"/file");
     Path fileQual = new Path(testURI().toString(), fileAbs);
     Path dir      = new Path(testBaseDir1());
@@ -205,4 +217,14 @@ abstract public class TestSymlinkLocalFS
       // Excpected.
     }
   }
+
+  /** Test create symlink to . */
+  @Override
+  public void testCreateLinkToDot() throws IOException {
+    try {
+      super.testCreateLinkToDot();
+    } catch (IllegalArgumentException iae) {
+      // Expected.
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileContext.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileContext.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileContext.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileContext.java Tue Aug 13 21:19:53 2013
@@ -17,8 +17,13 @@
  */
 package org.apache.hadoop.fs;
 
+import org.apache.hadoop.util.Shell;
 import org.junit.BeforeClass;
 
+import java.io.IOException;
+
+import static org.junit.Assume.assumeTrue;
+
 public class TestSymlinkLocalFSFileContext extends TestSymlinkLocalFS {
 
   @BeforeClass
@@ -27,4 +32,9 @@ public class TestSymlinkLocalFSFileConte
     wrapper = new FileContextTestWrapper(context);
   }
 
+  @Override
+  public void testRenameFileWithDestParentSymlink() throws IOException {
+    assumeTrue(!Shell.WINDOWS);
+    super.testRenameFileWithDestParentSymlink();
+  }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileSystem.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileSystem.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileSystem.java Tue Aug 13 21:19:53 2013
@@ -17,13 +17,20 @@
  */
 package org.apache.hadoop.fs;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.util.Shell;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+
 public class TestSymlinkLocalFSFileSystem extends TestSymlinkLocalFS {
 
   @BeforeClass
@@ -54,4 +61,36 @@ public class TestSymlinkLocalFSFileSyste
   @Override
   @Test(timeout=1000)
   public void testAccessFileViaInterSymlinkAbsTarget() throws IOException {}
+
+  @Override
+  public void testRenameFileWithDestParentSymlink() throws IOException {
+    assumeTrue(!Shell.WINDOWS);
+    super.testRenameFileWithDestParentSymlink();
+  }
+
+  @Override
+  @Test(timeout=10000)
+  /** Rename a symlink to itself */
+  public void testRenameSymlinkToItself() throws IOException {
+    Path file = new Path(testBaseDir1(), "file");
+    createAndWriteFile(file);
+
+    Path link = new Path(testBaseDir1(), "linkToFile1");
+    wrapper.createSymlink(file, link, false);
+    try {
+      wrapper.rename(link, link);
+      fail("Failed to get expected IOException");
+    } catch (IOException e) {
+      assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+    }
+    // Fails with overwrite as well
+    try {
+      wrapper.rename(link, link, Rename.OVERWRITE);
+      fail("Failed to get expected IOException");
+    } catch (IOException e) {
+      // Todo: Fix this test when HADOOP-9819 is fixed.
+      assertTrue(unwrapException(e) instanceof FileAlreadyExistsException ||
+                 unwrapException(e) instanceof FileNotFoundException);
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java Tue Aug 13 21:19:53 2013
@@ -134,6 +134,14 @@ public class TestCodec {
   public void testLz4Codec() throws IOException {
     if (NativeCodeLoader.isNativeCodeLoaded()) {
       if (Lz4Codec.isNativeCodeLoaded()) {
+        conf.setBoolean(
+            CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY,
+            false);
+        codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec");
+        codecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec");
+        conf.setBoolean(
+            CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY,
+            true);
         codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec");
         codecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec");
       } else {

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java Tue Aug 13 21:19:53 2013
@@ -100,6 +100,7 @@ public class TestRPC {
     
     void ping() throws IOException;
     void slowPing(boolean shouldSlow) throws IOException;
+    void sleep(long delay) throws IOException, InterruptedException;
     String echo(String value) throws IOException;
     String[] echo(String[] value) throws IOException;
     Writable echo(Writable value) throws IOException;
@@ -146,6 +147,11 @@ public class TestRPC {
     }
     
     @Override
+    public void sleep(long delay) throws InterruptedException {
+      Thread.sleep(delay);
+    }
+    
+    @Override
     public String echo(String value) throws IOException { return value; }
 
     @Override
@@ -932,6 +938,28 @@ public class TestRPC {
     }
   }
 
+  @Test
+  public void testConnectionPing() throws Exception {
+    Configuration conf = new Configuration();
+    int pingInterval = 50;
+    conf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
+    conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, pingInterval);
+    final Server server = new RPC.Builder(conf)
+        .setProtocol(TestProtocol.class).setInstance(new TestImpl())
+        .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
+        .build();
+    server.start();
+
+    final TestProtocol proxy = RPC.getProxy(TestProtocol.class,
+        TestProtocol.versionID, server.getListenerAddress(), conf);
+    try {
+      // this call will throw exception if server couldn't decode the ping
+      proxy.sleep(pingInterval*4);
+    } finally {
+      if (proxy != null) RPC.stopProxy(proxy);
+    }
+  }
+
   public static void main(String[] args) throws IOException {
     new TestRPC().testCallsInternal(conf);
 

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java?rev=1513658&r1=1513657&r2=1513658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java Tue Aug 13 21:19:53 2013
@@ -29,6 +29,7 @@ import java.lang.annotation.Annotation;
 import java.net.InetSocketAddress;
 import java.security.PrivilegedExceptionAction;
 import java.security.Security;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Set;
 import java.util.regex.Pattern;
@@ -44,8 +45,6 @@ import javax.security.sasl.SaslClient;
 import javax.security.sasl.SaslException;
 import javax.security.sasl.SaslServer;
 
-import junit.framework.Assert;
-
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -62,11 +61,11 @@ import org.apache.hadoop.security.SaslPl
 import org.apache.hadoop.security.SaslRpcClient;
 import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
+import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
 import org.apache.hadoop.security.SecurityInfo;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.TestUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
@@ -77,9 +76,28 @@ import org.apache.log4j.Level;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 
 /** Unit tests for using Sasl over RPC. */
+@RunWith(Parameterized.class)
 public class TestSaslRPC {
+  @Parameters
+  public static Collection<Object[]> data() {
+    Collection<Object[]> params = new ArrayList<Object[]>();
+    for (QualityOfProtection qop : QualityOfProtection.values()) {
+      params.add(new Object[]{ qop });
+    }
+    return params;
+  }
+
+  QualityOfProtection expectedQop;
+  
+  public TestSaslRPC(QualityOfProtection qop) {
+    expectedQop = qop;
+  }
+  
   private static final String ADDRESS = "0.0.0.0";
 
   public static final Log LOG =
@@ -115,8 +133,12 @@ public class TestSaslRPC {
 
   @Before
   public void setup() {
+    LOG.info("---------------------------------");
+    LOG.info("Testing QOP:"+expectedQop);
+    LOG.info("---------------------------------");
     conf = new Configuration();
     conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString());
+    conf.set("hadoop.rpc.protection", expectedQop.name().toLowerCase());
     UserGroupInformation.setConfiguration(conf);
     enableSecretManager = null;
     forceSecretManager = null;
@@ -226,15 +248,16 @@ public class TestSaslRPC {
       serverPrincipal = SERVER_PRINCIPAL_KEY)
   @TokenInfo(TestTokenSelector.class)
   public interface TestSaslProtocol extends TestRPC.TestProtocol {
-    public AuthenticationMethod getAuthMethod() throws IOException;
+    public AuthMethod getAuthMethod() throws IOException;
     public String getAuthUser() throws IOException;
   }
   
   public static class TestSaslImpl extends TestRPC.TestImpl implements
       TestSaslProtocol {
     @Override
-    public AuthenticationMethod getAuthMethod() throws IOException {
-      return UserGroupInformation.getCurrentUser().getAuthenticationMethod();
+    public AuthMethod getAuthMethod() throws IOException {
+      return UserGroupInformation.getCurrentUser()
+          .getAuthenticationMethod().getAuthMethod();
     }
     @Override
     public String getAuthUser() throws IOException {
@@ -341,8 +364,11 @@ public class TestSaslRPC {
     try {
       proxy = RPC.getProxy(TestSaslProtocol.class,
           TestSaslProtocol.versionID, addr, conf);
+      AuthMethod authMethod = proxy.getAuthMethod();
+      assertEquals(TOKEN, authMethod);
       //QOP must be auth
-      Assert.assertEquals(SaslRpcServer.SASL_PROPS.get(Sasl.QOP), "auth");
+      assertEquals(expectedQop.saslQop,
+                   RPC.getConnectionIdForProxy(proxy).getSaslQop());            
       proxy.ping();
     } finally {
       server.stop();
@@ -393,6 +419,7 @@ public class TestSaslRPC {
     newConf.set(CommonConfigurationKeysPublic.
         HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, "");
 
+    Client client = null;
     TestSaslProtocol proxy1 = null;
     TestSaslProtocol proxy2 = null;
     TestSaslProtocol proxy3 = null;
@@ -402,7 +429,7 @@ public class TestSaslRPC {
       proxy1 = RPC.getProxy(TestSaslProtocol.class,
           TestSaslProtocol.versionID, addr, newConf);
       proxy1.getAuthMethod();
-      Client client = WritableRpcEngine.getClient(conf);
+      client = WritableRpcEngine.getClient(newConf);
       Set<ConnectionId> conns = client.getConnectionIds();
       assertEquals("number of connections in cache is wrong", 1, conns.size());
       // same conf, connection should be re-used
@@ -428,9 +455,13 @@ public class TestSaslRPC {
       assertNotSame(connsArray[2].getMaxIdleTime(), timeouts[1]);
     } finally {
       server.stop();
-      RPC.stopProxy(proxy1);
-      RPC.stopProxy(proxy2);
-      RPC.stopProxy(proxy3);
+      // this is dirty, but clear out connection cache for next run
+      if (client != null) {
+        client.getConnectionIds().clear();
+      }
+      if (proxy1 != null) RPC.stopProxy(proxy1);
+      if (proxy2 != null) RPC.stopProxy(proxy2);
+      if (proxy3 != null) RPC.stopProxy(proxy3);
     }
   }
   
@@ -793,14 +824,13 @@ public class TestSaslRPC {
       final AuthMethod serverAuth,
       final UseToken tokenType) throws Exception {
     
-    String currentUser = UserGroupInformation.getCurrentUser().getUserName();
-    
     final Configuration serverConf = new Configuration(conf);
     serverConf.set(HADOOP_SECURITY_AUTHENTICATION, serverAuth.toString());
     UserGroupInformation.setConfiguration(serverConf);
     
-    final UserGroupInformation serverUgi =
-        UserGroupInformation.createRemoteUser(currentUser + "-SERVER/localhost@NONE");
+    final UserGroupInformation serverUgi = (serverAuth == KERBEROS)
+        ? UserGroupInformation.createRemoteUser("server/localhost@NONE")
+        : UserGroupInformation.createRemoteUser("server");
     serverUgi.setAuthenticationMethod(serverAuth);
 
     final TestTokenSecretManager sm = new TestTokenSecretManager();
@@ -835,7 +865,7 @@ public class TestSaslRPC {
     UserGroupInformation.setConfiguration(clientConf);
     
     final UserGroupInformation clientUgi =
-        UserGroupInformation.createRemoteUser(currentUser + "-CLIENT");
+        UserGroupInformation.createRemoteUser("client");
     clientUgi.setAuthenticationMethod(clientAuth);    
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
@@ -873,14 +903,13 @@ public class TestSaslRPC {
                 TestSaslProtocol.versionID, addr, clientConf);
             
             proxy.ping();
-            // verify sasl completed
-            if (serverAuth != SIMPLE) {
-              assertEquals(SaslRpcServer.SASL_PROPS.get(Sasl.QOP), "auth");
-            }
-            
             // make sure the other side thinks we are who we said we are!!!
             assertEquals(clientUgi.getUserName(), proxy.getAuthUser());
-            return proxy.getAuthMethod().toString();
+            AuthMethod authMethod = proxy.getAuthMethod();
+            // verify sasl completed with correct QOP
+            assertEquals((authMethod != SIMPLE) ? expectedQop.saslQop : null,
+                         RPC.getConnectionIdForProxy(proxy).getSaslQop());            
+            return authMethod.toString();
           } finally {
             if (proxy != null) {
               RPC.stopProxy(proxy);