You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2013/08/12 23:26:02 UTC

svn commit: r1513258 [6/9] - in /hadoop/common/branches/YARN-321/hadoop-common-project: ./ hadoop-annotations/ hadoop-auth-examples/ hadoop-auth-examples/src/main/webapp/ hadoop-auth-examples/src/main/webapp/annonymous/ hadoop-auth-examples/src/main/we...

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java Mon Aug 12 21:25:49 2013
@@ -208,9 +208,13 @@ public class UserGroupInformation {
    * A method to initialize the fields that depend on a configuration.
    * Must be called before useKerberos or groups is used.
    */
-  private static synchronized void ensureInitialized() {
+  private static void ensureInitialized() {
     if (conf == null) {
-      initialize(new Configuration(), false);
+      synchronized(UserGroupInformation.class) {
+        if (conf == null) { // someone might have beat us
+          initialize(new Configuration(), false);
+        }
+      }
     }
   }
 
@@ -1042,7 +1046,7 @@ public class UserGroupInformation {
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
   public static UserGroupInformation createRemoteUser(String user) {
-    if (user == null || "".equals(user)) {
+    if (user == null || user.isEmpty()) {
       throw new IllegalArgumentException("Null user");
     }
     Subject subject = new Subject();
@@ -1114,7 +1118,7 @@ public class UserGroupInformation {
   @InterfaceStability.Evolving
   public static UserGroupInformation createProxyUser(String user,
       UserGroupInformation realUser) {
-    if (user == null || "".equals(user)) {
+    if (user == null || user.isEmpty()) {
       throw new IllegalArgumentException("Null user");
     }
     if (realUser == null) {

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java Mon Aug 12 21:25:49 2013
@@ -22,6 +22,7 @@ import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.io.retry.Idempotent;
 import org.apache.hadoop.security.KerberosInfo;
 
 /**
@@ -42,5 +43,6 @@ public interface RefreshAuthorizationPol
    * Refresh the service-level authorization policy in-effect.
    * @throws IOException
    */
+  @Idempotent
   void refreshServiceAcl() throws IOException;
 }

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java Mon Aug 12 21:25:49 2013
@@ -88,7 +88,7 @@ public class ServiceAuthorizationManager
     String clientPrincipal = null; 
     if (krbInfo != null) {
       String clientKey = krbInfo.clientPrincipal();
-      if (clientKey != null && !clientKey.equals("")) {
+      if (clientKey != null && !clientKey.isEmpty()) {
         try {
           clientPrincipal = SecurityUtil.getServerPrincipal(
               conf.get(clientKey), addr);

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java Mon Aug 12 21:25:49 2013
@@ -87,12 +87,12 @@ extends TokenIdentifier {
    */
   @Override
   public UserGroupInformation getUser() {
-    if ( (owner == null) || ("".equals(owner.toString()))) {
+    if ( (owner == null) || (owner.toString().isEmpty())) {
       return null;
     }
     final UserGroupInformation realUgi;
     final UserGroupInformation ugi;
-    if ((realUser == null) || ("".equals(realUser.toString()))
+    if ((realUser == null) || (realUser.toString().isEmpty())
         || realUser.equals(owner)) {
       ugi = realUgi = UserGroupInformation.createRemoteUser(owner.toString());
     } else {

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java Mon Aug 12 21:25:49 2013
@@ -335,7 +335,7 @@ extends AbstractDelegationTokenIdentifie
       throw new InvalidToken("User " + renewer + 
                              " tried to renew an expired token");
     }
-    if ((id.getRenewer() == null) || ("".equals(id.getRenewer().toString()))) {
+    if ((id.getRenewer() == null) || (id.getRenewer().toString().isEmpty())) {
       throw new AccessControlException("User " + renewer + 
                                        " tried to renew a token without " +
                                        "a renewer");
@@ -392,7 +392,7 @@ extends AbstractDelegationTokenIdentifie
     HadoopKerberosName cancelerKrbName = new HadoopKerberosName(canceller);
     String cancelerShortName = cancelerKrbName.getShortName();
     if (!canceller.equals(owner)
-        && (renewer == null || "".equals(renewer.toString()) || !cancelerShortName
+        && (renewer == null || renewer.toString().isEmpty() || !cancelerShortName
             .equals(renewer.toString()))) {
       throw new AccessControlException(canceller
           + " is not authorized to cancel the token");

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java Mon Aug 12 21:25:49 2013
@@ -21,6 +21,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.retry.Idempotent;
 
 /**
  * Protocol implemented by the Name Node and Job Tracker which maps users to
@@ -41,5 +42,6 @@ public interface GetUserMappingsProtocol
    * @return The set of groups the user belongs to.
    * @throws IOException
    */
+  @Idempotent
   public String[] getGroupsForUser(String user) throws IOException;
 }

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java Mon Aug 12 21:25:49 2013
@@ -82,13 +82,13 @@ public class HostsFileReader {
 
   public synchronized void refresh() throws IOException {
     LOG.info("Refreshing hosts (include/exclude) list");
-    if (!includesFile.equals("")) {
+    if (!includesFile.isEmpty()) {
       Set<String> newIncludes = new HashSet<String>();
       readFileToSet("included", includesFile, newIncludes);
       // switch the new hosts that are to be included
       includes = newIncludes;
     }
-    if (!excludesFile.equals("")) {
+    if (!excludesFile.isEmpty()) {
       Set<String> newExcludes = new HashSet<String>();
       readFileToSet("excluded", excludesFile, newExcludes);
       // switch the excluded hosts

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java Mon Aug 12 21:25:49 2013
@@ -79,6 +79,8 @@ public class NativeCodeLoader {
    */
   public static native boolean buildSupportsSnappy();
 
+  public static native String getLibraryName();
+
   /**
    * Return if native hadoop libraries, if present, can be used for this job.
    * @param conf configuration

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java Mon Aug 12 21:25:49 2013
@@ -20,7 +20,9 @@ package org.apache.hadoop.util;
 
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.compress.Lz4Codec;
 import org.apache.hadoop.io.compress.SnappyCodec;
+import org.apache.hadoop.io.compress.bzip2.Bzip2Factory;
 import org.apache.hadoop.io.compress.zlib.ZlibFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -35,7 +37,7 @@ public class NativeLibraryChecker {
     String usage = "NativeLibraryChecker [-a|-h]\n"
         + "  -a  use -a to check all libraries are available\n"
         + "      by default just check hadoop library is available\n"
-        + "      exit with error code if check failed\n"
+        + "      exit with error code 1 if check failed\n"
         + "  -h  print this message\n";
     if (args.length > 1 ||
         (args.length == 1 &&
@@ -51,23 +53,44 @@ public class NativeLibraryChecker {
       }
       checkAll = true;
     }
+    Configuration conf = new Configuration();
     boolean nativeHadoopLoaded = NativeCodeLoader.isNativeCodeLoaded();
     boolean zlibLoaded = false;
     boolean snappyLoaded = false;
     // lz4 is linked within libhadoop
     boolean lz4Loaded = nativeHadoopLoaded;
+    boolean bzip2Loaded = Bzip2Factory.isNativeBzip2Loaded(conf);
+    String hadoopLibraryName = "";
+    String zlibLibraryName = "";
+    String snappyLibraryName = "";
+    String lz4LibraryName = "";
+    String bzip2LibraryName = "";
     if (nativeHadoopLoaded) {
-      zlibLoaded = ZlibFactory.isNativeZlibLoaded(new Configuration());
+      hadoopLibraryName = NativeCodeLoader.getLibraryName();
+      zlibLoaded = ZlibFactory.isNativeZlibLoaded(conf);
+      if (zlibLoaded) {
+        zlibLibraryName = ZlibFactory.getLibraryName();
+      }
       snappyLoaded = NativeCodeLoader.buildSupportsSnappy() &&
           SnappyCodec.isNativeCodeLoaded();
+      if (snappyLoaded && NativeCodeLoader.buildSupportsSnappy()) {
+        snappyLibraryName = SnappyCodec.getLibraryName();
+      }
+      if (lz4Loaded) {
+        lz4LibraryName = Lz4Codec.getLibraryName();
+      }
+      if (bzip2Loaded) {
+        bzip2LibraryName = Bzip2Factory.getLibraryName(conf);
+      }
     }
     System.out.println("Native library checking:");
-    System.out.printf("hadoop: %b\n", nativeHadoopLoaded);
-    System.out.printf("zlib:   %b\n", zlibLoaded);
-    System.out.printf("snappy: %b\n", snappyLoaded);
-    System.out.printf("lz4:    %b\n", lz4Loaded);
+    System.out.printf("hadoop: %b %s\n", nativeHadoopLoaded, hadoopLibraryName);
+    System.out.printf("zlib:   %b %s\n", zlibLoaded, zlibLibraryName);
+    System.out.printf("snappy: %b %s\n", snappyLoaded, snappyLibraryName);
+    System.out.printf("lz4:    %b %s\n", lz4Loaded, lz4LibraryName);
+    System.out.printf("bzip2:  %b %s\n", bzip2Loaded, bzip2LibraryName);
     if ((!nativeHadoopLoaded) ||
-        (checkAll && !(zlibLoaded && snappyLoaded && lz4Loaded))) {
+        (checkAll && !(zlibLoaded && snappyLoaded && lz4Loaded && bzip2Loaded))) {
       // return 1 to indicated check failed
       ExitUtil.terminate(1);
     }

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java Mon Aug 12 21:25:49 2013
@@ -28,6 +28,8 @@ import org.apache.hadoop.ipc.protobuf.Rp
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.UserGroupInformation;
 
+import com.google.protobuf.ByteString;
+
 public abstract class ProtoUtil {
 
   /**
@@ -158,9 +160,11 @@ public abstract class ProtoUtil {
   }
  
   public static RpcRequestHeaderProto makeRpcRequestHeader(RPC.RpcKind rpcKind,
-      RpcRequestHeaderProto.OperationProto operation, int callId) {
+      RpcRequestHeaderProto.OperationProto operation, int callId,
+      int retryCount, byte[] uuid) {
     RpcRequestHeaderProto.Builder result = RpcRequestHeaderProto.newBuilder();
-    result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId);
+    result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId)
+        .setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid));
     return result.build();
   }
 }

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java Mon Aug 12 21:25:49 2013
@@ -123,6 +123,12 @@ abstract public class Shell {
                    : new String[] { "ln", "-s", target, link };
   }
 
+  /** Return a command to read the target of the a symbolic link*/
+  public static String[] getReadlinkCommand(String link) {
+    return WINDOWS ? new String[] { WINUTILS, "readlink", link }
+        : new String[] { "readlink", link };
+  }
+
   /** Return a command for determining if process with specified pid is alive. */
   public static String[] getCheckProcessIsAliveCommand(String pid) {
     return Shell.WINDOWS ?

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java Mon Aug 12 21:25:49 2013
@@ -40,7 +40,6 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.util.Shell;
 
 import com.google.common.net.InetAddresses;
 
@@ -353,7 +352,7 @@ public class StringUtils {
    * @return an array of <code>String</code> values
    */
   public static String[] getTrimmedStrings(String str){
-    if (null == str || "".equals(str.trim())) {
+    if (null == str || str.trim().isEmpty()) {
       return emptyStringArray;
     }
 
@@ -413,7 +412,7 @@ public class StringUtils {
       String str, char separator) {
     // String.split returns a single empty result for splitting the empty
     // string.
-    if ("".equals(str)) {
+    if (str.isEmpty()) {
       return new String[]{""};
     }
     ArrayList<String> strList = new ArrayList<String>();
@@ -894,4 +893,16 @@ public class StringUtils {
     matcher.appendTail(sb);
     return sb.toString();
   }
+  
+  /**
+   * Get stack trace for a given thread.
+   */
+  public static String getStackTrace(Thread t) {
+    final StackTraceElement[] stackTrace = t.getStackTrace();
+    StringBuilder str = new StringBuilder();
+    for (StackTraceElement e : stackTrace) {
+      str.append(e.toString() + "\n");
+    }
+    return str.toString();
+  }
 }

Propchange: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/package.html
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/java/overview.html
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj Mon Aug 12 21:25:49 2013
@@ -17,7 +17,7 @@
    limitations under the License.
 -->
 
-<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+<Project DefaultTargets="CheckRequireSnappy;Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
   <ItemGroup Label="ProjectConfigurations">
     <ProjectConfiguration Include="Release|x64">
       <Configuration>Release</Configuration>
@@ -49,6 +49,21 @@
     <IntDir>..\..\..\target\native\$(Configuration)\</IntDir>
     <TargetName>hadoop</TargetName>
   </PropertyGroup>
+  <PropertyGroup>
+    <SnappyLib Condition="Exists('$(CustomSnappyPrefix)\snappy.dll')">$(CustomSnappyPrefix)</SnappyLib>
+    <SnappyLib Condition="Exists('$(CustomSnappyPrefix)\lib\snappy.dll') And '$(SnappyLib)' == ''">$(CustomSnappyPrefix)\lib</SnappyLib>
+    <SnappyLib Condition="Exists('$(CustomSnappyLib)') And '$(SnappyLib)' == ''">$(CustomSnappyLib)</SnappyLib>
+    <SnappyInclude Condition="Exists('$(CustomSnappyPrefix)\snappy.h')">$(CustomSnappyPrefix)</SnappyInclude>
+    <SnappyInclude Condition="Exists('$(CustomSnappyPrefix)\include\snappy.h') And '$(SnappyInclude)' == ''">$(CustomSnappyPrefix)\include</SnappyInclude>
+    <SnappyInclude Condition="Exists('$(CustomSnappyInclude)') And '$(SnappyInclude)' == ''">$(CustomSnappyInclude)</SnappyInclude>
+    <SnappyEnabled Condition="'$(SnappyLib)' != '' And '$(SnappyInclude)' != ''">true</SnappyEnabled>
+    <IncludePath Condition="'$(SnappyEnabled)' == 'true'">$(SnappyInclude);$(IncludePath)</IncludePath>
+  </PropertyGroup>
+  <Target Name="CheckRequireSnappy">
+    <Error
+      Text="Required snappy library could not be found.  SnappyLibrary=$(SnappyLibrary), SnappyInclude=$(SnappyInclude), CustomSnappyLib=$(CustomSnappyLib), CustomSnappyInclude=$(CustomSnappyInclude), CustomSnappyPrefix=$(CustomSnappyPrefix)"
+      Condition="'$(RequireSnappy)' == 'true' And '$(SnappyEnabled)' != 'true'" />
+  </Target>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
     <ClCompile>
       <WarningLevel>Level3</WarningLevel>
@@ -71,19 +86,29 @@
     </Link>
   </ItemDefinitionGroup>
   <ItemGroup>
+    <ClCompile Include="src\org\apache\hadoop\io\compress\snappy\SnappyCompressor.c" Condition="'$(SnappyEnabled)' == 'true'">
+      <AdditionalOptions>/D HADOOP_SNAPPY_LIBRARY=L\"snappy.dll\"</AdditionalOptions>
+    </ClCompile>
+    <ClCompile Include="src\org\apache\hadoop\io\compress\snappy\SnappyDecompressor.c" Condition="'$(SnappyEnabled)' == 'true'">
+      <AdditionalOptions>/D HADOOP_SNAPPY_LIBRARY=L\"snappy.dll\"</AdditionalOptions>
+    </ClCompile>
     <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\lz4.c" />
+    <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\lz4hc.c" />
     <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\Lz4Compressor.c" />
     <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\Lz4Decompressor.c" />
     <ClCompile Include="src\org\apache\hadoop\io\nativeio\file_descriptor.c" />
     <ClCompile Include="src\org\apache\hadoop\io\nativeio\NativeIO.c" />
     <ClCompile Include="src\org\apache\hadoop\security\JniBasedUnixGroupsMappingWin.c" />
     <ClCompile Include="src\org\apache\hadoop\util\bulk_crc32.c" />
-    <ClCompile Include="src\org\apache\hadoop\util\NativeCodeLoader.c" />
+    <ClCompile Include="src\org\apache\hadoop\util\NativeCodeLoader.c">
+      <AdditionalOptions Condition="'$(SnappyEnabled)' == 'true'">/D HADOOP_SNAPPY_LIBRARY=L\"snappy.dll\"</AdditionalOptions>
+    </ClCompile>
     <ClCompile Include="src\org\apache\hadoop\util\NativeCrc32.c" />
   </ItemGroup>
   <ItemGroup>
     <ClInclude Include="..\src\org\apache\hadoop\util\crc32c_tables.h" />
     <ClInclude Include="..\src\org\apache\hadoop\util\crc32_zlib_polynomial_tables.h" />
+    <ClInclude Include="src\org\apache\hadoop\io\compress\snappy\org_apache_hadoop_io_compress_snappy.h" />
     <ClInclude Include="src\org\apache\hadoop\io\nativeio\file_descriptor.h" />
     <ClInclude Include="src\org\apache\hadoop\util\bulk_crc32.h" />
     <ClInclude Include="src\org\apache\hadoop\util\crc32c_tables.h" />

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj.filters
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj.filters?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj.filters (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj.filters Mon Aug 12 21:25:49 2013
@@ -51,6 +51,9 @@
     <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\lz4.c">
       <Filter>Source Files</Filter>
     </ClCompile>
+    <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\lz4hc.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
     <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\Lz4Compressor.c">
       <Filter>Source Files</Filter>
     </ClCompile>

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.c?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.c (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.c Mon Aug 12 21:25:49 2013
@@ -239,6 +239,21 @@ Java_org_apache_hadoop_io_compress_bzip2
     }
 }
 
+JNIEXPORT jstring JNICALL
+Java_org_apache_hadoop_io_compress_bzip2_Bzip2Compressor_getLibraryName(JNIEnv *env, jclass class) {
+#ifdef UNIX
+  if (dlsym_BZ2_bzCompress) {
+    Dl_info dl_info;
+    if(dladdr(
+        dlsym_BZ2_bzCompress,
+        &dl_info)) {
+      return (*env)->NewStringUTF(env, dl_info.dli_fname);
+    }
+  }
+#endif
+  return (*env)->NewStringUTF(env, HADOOP_BZIP2_LIBRARY);
+}
+
 /**
  * vim: sw=2: ts=2: et:
  */

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c Mon Aug 12 21:25:49 2013
@@ -23,21 +23,9 @@
 #ifdef UNIX
 #include "config.h"
 #endif // UNIX
+#include "lz4.h"
+#include "lz4hc.h"
 
-//****************************
-// Simple Functions
-//****************************
-
-extern int LZ4_compress   (const char* source, char* dest, int isize);
-
-/*
-LZ4_compress() :
- return : the number of bytes in compressed buffer dest
- note : destination buffer must be already allocated.
-  To avoid any problem, size it to handle worst cases situations (input data not compressible)
-  Worst case size is : "inputsize + 0.4%", with "0.4%" being at least 8 bytes.
-
-*/
 
 static jfieldID Lz4Compressor_clazz;
 static jfieldID Lz4Compressor_uncompressedDirectBuf;
@@ -103,3 +91,49 @@ JNIEXPORT jint JNICALL Java_org_apache_h
   return (jint)compressed_direct_buf_len;
 }
 
+JNIEXPORT jstring JNICALL
+Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_getLibraryName(
+ JNIEnv *env, jclass class
+ ) {
+  return (*env)->NewStringUTF(env, "revision:99");
+}
+
+JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_compressBytesDirectHC
+(JNIEnv *env, jobject thisj){
+  const char* uncompressed_bytes = NULL;
+  char* compressed_bytes = NULL;
+
+  // Get members of Lz4Compressor
+  jobject clazz = (*env)->GetStaticObjectField(env, thisj, Lz4Compressor_clazz);
+  jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, Lz4Compressor_uncompressedDirectBuf);
+  jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Compressor_uncompressedDirectBufLen);
+  jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, Lz4Compressor_compressedDirectBuf);
+  jint compressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Compressor_directBufferSize);
+
+  // Get the input direct buffer
+  LOCK_CLASS(env, clazz, "Lz4Compressor");
+  uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
+  UNLOCK_CLASS(env, clazz, "Lz4Compressor");
+
+  if (uncompressed_bytes == 0) {
+    return (jint)0;
+  }
+
+  // Get the output direct buffer
+  LOCK_CLASS(env, clazz, "Lz4Compressor");
+  compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
+  UNLOCK_CLASS(env, clazz, "Lz4Compressor");
+
+  if (compressed_bytes == 0) {
+    return (jint)0;
+  }
+
+  compressed_direct_buf_len = LZ4_compressHC(uncompressed_bytes, compressed_bytes, uncompressed_direct_buf_len);
+  if (compressed_direct_buf_len < 0){
+    THROW(env, "java/lang/InternalError", "LZ4_compressHC failed");
+  }
+
+  (*env)->SetIntField(env, thisj, Lz4Compressor_uncompressedDirectBufLen, 0);
+
+  return (jint)compressed_direct_buf_len;
+}

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c Mon Aug 12 21:25:49 2013
@@ -22,18 +22,7 @@
 #ifdef UNIX
 #include "config.h"
 #endif // UNIX
-
-int LZ4_uncompress_unknownOutputSize(const char* source, char* dest, int isize, int maxOutputSize);
-
-/*
-LZ4_uncompress_unknownOutputSize() :
- isize  : is the input size, therefore the compressed size
- maxOutputSize : is the size of the destination buffer (which must be already allocated)
- return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize)
-    If the source stream is malformed, the function will stop decoding and return a negative result, indicating the byte position of the faulty instruction
-    This version never writes beyond dest + maxOutputSize, and is therefore protected against malicious data packets
- note   : This version is a bit slower than LZ4_uncompress
-*/
+#include "lz4.h"
 
 
 static jfieldID Lz4Decompressor_clazz;
@@ -89,7 +78,7 @@ JNIEXPORT jint JNICALL Java_org_apache_h
     return (jint)0;
   }
 
-  uncompressed_direct_buf_len = LZ4_uncompress_unknownOutputSize(compressed_bytes, uncompressed_bytes, compressed_direct_buf_len, uncompressed_direct_buf_len);
+  uncompressed_direct_buf_len = LZ4_decompress_safe(compressed_bytes, uncompressed_bytes, compressed_direct_buf_len, uncompressed_direct_buf_len);
   if (uncompressed_direct_buf_len < 0) {
     THROW(env, "Ljava/lang/InternalError", "LZ4_uncompress_unknownOutputSize failed.");
   }

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c Mon Aug 12 21:25:49 2013
@@ -1,19 +1,19 @@
 /*
    LZ4 - Fast LZ compression algorithm
-   Copyright (C) 2011, Yann Collet.
-   BSD License
+   Copyright (C) 2011-2013, Yann Collet.
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
 
    Redistribution and use in source and binary forms, with or without
    modification, are permitted provided that the following conditions are
    met:
-  
+
        * Redistributions of source code must retain the above copyright
    notice, this list of conditions and the following disclaimer.
        * Redistributions in binary form must reproduce the above
    copyright notice, this list of conditions and the following disclaimer
    in the documentation and/or other materials provided with the
    distribution.
-  
+
    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -25,621 +25,672 @@
    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
+   - LZ4 source repository : http://code.google.com/p/lz4/
+*/
+
+/*
+Note : this source file requires "lz4_encoder.h"
 */
 
 //**************************************
-//  Copy from:
-// URL: http://lz4.googlecode.com/svn/trunk/lz4.c
-// Repository Root: http://lz4.googlecode.com/svn
-// Repository UUID: 650e7d94-2a16-8b24-b05c-7c0b3f6821cd
-// Revision: 43
-// Node Kind: file
-// Last Changed Author: yann.collet.73@gmail.com
-// Last Changed Rev: 43
-// Last Changed Date: 2011-12-16 15:41:46 -0800 (Fri, 16 Dec 2011)
-// Sha1: 9db7b2c57698c528d79572e6bce2e7dc33fa5998
+// Tuning parameters
 //**************************************
+// MEMORY_USAGE :
+// Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+// Increasing memory usage improves compression ratio
+// Reduced memory usage can improve speed, due to cache effect
+// Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
+#define MEMORY_USAGE 14
+
+// HEAPMODE :
+// Select how default compression function will allocate memory for its hash table,
+// in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
+// Default allocation strategy is to use stack (HEAPMODE 0)
+// Note : explicit functions *_stack* and *_heap* are unaffected by this setting
+#define HEAPMODE 0
+
 
 //**************************************
-// Compilation Directives
+// CPU Feature Detection
 //**************************************
-#if __STDC_VERSION__ >= 199901L
-  /* "restrict" is a known keyword */
+// 32 or 64 bits ?
+#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
+  || defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) \
+  || defined(__64BIT__) || defined(_LP64) || defined(__LP64__) \
+  || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) )   // Detects 64 bits mode
+#  define LZ4_ARCH64 1
 #else
-#define restrict  // Disable restrict
+#  define LZ4_ARCH64 0
+#endif
+
+// Little Endian or Big Endian ?
+// Overwrite the #define below if you know your architecture endianess
+#if defined (__GLIBC__)
+#  include <endian.h>
+#  if (__BYTE_ORDER == __BIG_ENDIAN)
+#     define LZ4_BIG_ENDIAN 1
+#  endif
+#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
+#  define LZ4_BIG_ENDIAN 1
+#elif defined(__sparc) || defined(__sparc__) \
+   || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
+   || defined(__hpux)  || defined(__hppa) \
+   || defined(_MIPSEB) || defined(__s390__)
+#  define LZ4_BIG_ENDIAN 1
+#else
+// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
+#endif
+
+// Unaligned memory access is automatically enabled for "common" CPU, such as x86.
+// For others CPU, such as ARM, the compiler may be more cautious, inserting unnecessary extra code to ensure aligned access property
+// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
+#if defined(__ARM_FEATURE_UNALIGNED)
+#  define LZ4_FORCE_UNALIGNED_ACCESS 1
+#endif
+
+// Define this parameter if your target system or compiler does not support hardware bit count
+#if defined(_MSC_VER) && defined(_WIN32_WCE)            // Visual Studio for Windows CE does not support Hardware bit count
+#  define LZ4_FORCE_SW_BITCOUNT
 #endif
 
+// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
+// This option may provide a small boost to performance for some big endian cpu, although probably modest.
+// You may set this option to 1 if data will remain within closed environment.
+// This option is useless on Little_Endian CPU (such as x86)
+//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1
+
 
 //**************************************
-// Includes
+// Compiler Options
 //**************************************
-#include <stdlib.h>   // for malloc
-#include <string.h>   // for memset
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)   // C99
+/* "restrict" is a known keyword */
+#else
+#  define restrict // Disable restrict
+#endif
+
+#ifdef _MSC_VER    // Visual Studio
+#  define forceinline static __forceinline
+#  include <intrin.h>                    // For Visual 2005
+#  if LZ4_ARCH64   // 64-bits
+#    pragma intrinsic(_BitScanForward64) // For Visual 2005
+#    pragma intrinsic(_BitScanReverse64) // For Visual 2005
+#  else            // 32-bits
+#    pragma intrinsic(_BitScanForward)   // For Visual 2005
+#    pragma intrinsic(_BitScanReverse)   // For Visual 2005
+#  endif
+#  pragma warning(disable : 4127)        // disable: C4127: conditional expression is constant
+#else 
+#  ifdef __GNUC__
+#    define forceinline static inline __attribute__((always_inline))
+#  else
+#    define forceinline static inline
+#  endif
+#endif
+
+#ifdef _MSC_VER
+#  define lz4_bswap16(x) _byteswap_ushort(x)
+#else
+#  define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
+#endif
+
+#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
+#  define expect(expr,value)    (__builtin_expect ((expr),(value)) )
+#else
+#  define expect(expr,value)    (expr)
+#endif
+
+#define likely(expr)     expect((expr) != 0, 1)
+#define unlikely(expr)   expect((expr) != 0, 0)
 
 
 //**************************************
-// Performance parameter               
+// Includes
 //**************************************
-// Increasing this value improves compression ratio
-// Lowering this value reduces memory usage
-// Lowering may also improve speed, typically on reaching cache size limits (L1 32KB for Intel, 64KB for AMD)
-// Memory usage formula for 32 bits systems : N->2^(N+2) Bytes (examples : 17 -> 512KB ; 12 -> 16KB)
-#define HASH_LOG 12
+#include <stdlib.h>   // for malloc
+#include <string.h>   // for memset
+#include "lz4.h"
 
 
 //**************************************
 // Basic Types
 //**************************************
-#if defined(_MSC_VER)    // Visual Studio does not support 'stdint' natively
-#define BYTE	unsigned __int8
-#define U16		unsigned __int16
-#define U32		unsigned __int32
-#define S32		__int32
-#else
-#include <stdint.h>
-#define BYTE	uint8_t
-#define U16		uint16_t
-#define U32		uint32_t
-#define S32		int32_t
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   // C99
+# include <stdint.h>
+  typedef  uint8_t BYTE;
+  typedef uint16_t U16;
+  typedef uint32_t U32;
+  typedef  int32_t S32;
+  typedef uint64_t U64;
+#else
+  typedef unsigned char       BYTE;
+  typedef unsigned short      U16;
+  typedef unsigned int        U32;
+  typedef   signed int        S32;
+  typedef unsigned long long  U64;
 #endif
 
+#if defined(__GNUC__)  && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
+#  define _PACKED __attribute__ ((packed))
+#else
+#  define _PACKED
+#endif
+
+#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
+#  ifdef __IBMC__
+#    pragma pack(1)
+#  else
+#    pragma pack(push, 1)
+#  endif
+#endif
+
+typedef struct { U16 v; }  _PACKED U16_S;
+typedef struct { U32 v; }  _PACKED U32_S;
+typedef struct { U64 v; }  _PACKED U64_S;
+typedef struct {size_t v;} _PACKED size_t_S;
+
+#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
+#  pragma pack(pop)
+#endif
+
+#define A16(x)   (((U16_S *)(x))->v)
+#define A32(x)   (((U32_S *)(x))->v)
+#define A64(x)   (((U64_S *)(x))->v)
+#define AARCH(x) (((size_t_S *)(x))->v)
+
 
 //**************************************
 // Constants
 //**************************************
+#define HASHTABLESIZE (1 << MEMORY_USAGE)
+
 #define MINMATCH 4
-#define SKIPSTRENGTH 6
-#define STACKLIMIT 13
-#define HEAPMODE (HASH_LOG>STACKLIMIT)  // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()).
-#define COPYTOKEN 4
+
 #define COPYLENGTH 8
 #define LASTLITERALS 5
 #define MFLIMIT (COPYLENGTH+MINMATCH)
 #define MINLENGTH (MFLIMIT+1)
 
+#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1))
+#define SKIPSTRENGTH 6     // Increasing this value will make the compression run slower on incompressible data
+
 #define MAXD_LOG 16
 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
 
-#define HASHTABLESIZE (1 << HASH_LOG)
-#define HASH_MASK (HASHTABLESIZE - 1)
-
-#define ML_BITS 4
-#define ML_MASK ((1U<<ML_BITS)-1)
+#define ML_BITS  4
+#define ML_MASK  ((1U<<ML_BITS)-1)
 #define RUN_BITS (8-ML_BITS)
 #define RUN_MASK ((1U<<RUN_BITS)-1)
 
 
 //**************************************
-// Local structures
+// Architecture-specific macros
 //**************************************
-struct refTables
-{
-	const BYTE* hashTable[HASHTABLESIZE];
-};
-
-#ifdef __GNUC__
-#  define _PACKED __attribute__ ((packed))
-#else
-#  define _PACKED
+#define STEPSIZE                  sizeof(size_t)
+#define LZ4_COPYSTEP(s,d)         { AARCH(d) = AARCH(s); d+=STEPSIZE; s+=STEPSIZE; }
+#define LZ4_COPY8(s,d)            { LZ4_COPYSTEP(s,d); if (STEPSIZE<8) LZ4_COPYSTEP(s,d); }
+#define LZ4_SECURECOPY(s,d,e)     { if ((STEPSIZE==8)&&(d<e)) LZ4_WILDCOPY(s,d,e); }
+
+#if LZ4_ARCH64   // 64-bit
+#  define HTYPE                   U32
+#  define INITBASE(base)          const BYTE* const base = ip
+#else            // 32-bit
+#  define HTYPE                   const BYTE*
+#  define INITBASE(base)          const int base = 0
+#endif
+
+#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
+#  define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
+#  define LZ4_WRITE_LITTLEENDIAN_16(p,i)  { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
+#else      // Little Endian
+#  define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
+#  define LZ4_WRITE_LITTLEENDIAN_16(p,v)  { A16(p) = v; p+=2; }
 #endif
 
-typedef struct _U32_S
-{
-	U32 v;
-} _PACKED U32_S;
-
-typedef struct _U16_S
-{
-	U16 v;
-} _PACKED U16_S;
-
-#define A32(x) (((U32_S *)(x))->v)
-#define A16(x) (((U16_S *)(x))->v)
-
 
 //**************************************
 // Macros
 //**************************************
-#define LZ4_HASH_FUNCTION(i)	(((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG))
-#define LZ4_HASH_VALUE(p)		LZ4_HASH_FUNCTION(A32(p))
-#define LZ4_COPYPACKET(s,d)		A32(d) = A32(s); d+=4; s+=4; A32(d) = A32(s); d+=4; s+=4;
-#define LZ4_WILDCOPY(s,d,e)		do { LZ4_COPYPACKET(s,d) } while (d<e);
-#define LZ4_BLINDCOPY(s,d,l)	{ BYTE* e=d+l; LZ4_WILDCOPY(s,d,e); d=e; }
-
+#define LZ4_WILDCOPY(s,d,e)     { do { LZ4_COPY8(s,d) } while (d<e); }
+#define LZ4_BLINDCOPY(s,d,l)    { BYTE* e=(d)+(l); LZ4_WILDCOPY(s,d,e); d=e; }
 
 
 //****************************
-// Compression CODE
+// Private functions
 //****************************
+#if LZ4_ARCH64
+
+forceinline int LZ4_NbCommonBytes (register U64 val)
+{
+# if defined(LZ4_BIG_ENDIAN)
+#   if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    unsigned long r = 0;
+    _BitScanReverse64( &r, val );
+    return (int)(r>>3);
+#   elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    return (__builtin_clzll(val) >> 3);
+#   else
+    int r;
+    if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
+    if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+    r += (!val);
+    return r;
+#   endif
+# else
+#   if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    unsigned long r = 0;
+    _BitScanForward64( &r, val );
+    return (int)(r>>3);
+#   elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    return (__builtin_ctzll(val) >> 3);
+#   else
+    static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
+    return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+#   endif
+# endif
+}
 
-int LZ4_compressCtx(void** ctx,
-				 char* source, 
-				 char* dest,
-				 int isize)
-{	
-#if HEAPMODE
-	struct refTables *srt = (struct refTables *) (*ctx);
-	const BYTE** HashTable;
 #else
-	const BYTE* HashTable[HASHTABLESIZE] = {0};
-#endif
 
-	const BYTE* ip = (BYTE*) source;       
-	const BYTE* anchor = ip;
-	const BYTE* const iend = ip + isize;
-	const BYTE* const mflimit = iend - MFLIMIT;
-#define matchlimit (iend - LASTLITERALS)
+forceinline int LZ4_NbCommonBytes (register U32 val)
+{
+# if defined(LZ4_BIG_ENDIAN)
+#   if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    unsigned long r = 0;
+    _BitScanReverse( &r, val );
+    return (int)(r>>3);
+#   elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    return (__builtin_clz(val) >> 3);
+#   else
+    int r;
+    if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+    r += (!val);
+    return r;
+#   endif
+# else
+#   if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    unsigned long r;
+    _BitScanForward( &r, val );
+    return (int)(r>>3);
+#   elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+    return (__builtin_ctz(val) >> 3);
+#   else
+    static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
+    return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+#   endif
+# endif
+}
 
-	BYTE* op = (BYTE*) dest;
-	
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-	const size_t DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
 #endif
-	int len, length;
-	const int skipStrength = SKIPSTRENGTH;
-	U32 forwardH;
 
 
-	// Init 
-	if (isize<MINLENGTH) goto _last_literals;
-#if HEAPMODE
-	if (*ctx == NULL) 
-	{
-		srt = (struct refTables *) malloc ( sizeof(struct refTables) );
-		*ctx = (void*) srt;
-	}
-	HashTable = srt->hashTable;
-	memset((void*)HashTable, 0, sizeof(srt->hashTable));
-#else
-	(void) ctx;
-#endif
-
-
-	// First Byte
-	HashTable[LZ4_HASH_VALUE(ip)] = ip;
-	ip++; forwardH = LZ4_HASH_VALUE(ip);
-	
-	// Main Loop
-    for ( ; ; ) 
-	{
-		int findMatchAttempts = (1U << skipStrength) + 3;
-		const BYTE* forwardIp = ip;
-		const BYTE* ref;
-		BYTE* token;
-
-		// Find a match
-		do {
-			U32 h = forwardH;
-			int step = findMatchAttempts++ >> skipStrength;
-			ip = forwardIp;
-			forwardIp = ip + step;
-
-			if (forwardIp > mflimit) { goto _last_literals; }
-
-			forwardH = LZ4_HASH_VALUE(forwardIp);
-			ref = HashTable[h];
-			HashTable[h] = ip;
-
-		} while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
-
-		// Catch up
-		while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }  
-
-		// Encode Literal length
-		length = ip - anchor;
-		token = op++;
-		if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } 
-		else *token = (length<<ML_BITS);
-
-		// Copy Literals
-		LZ4_BLINDCOPY(anchor, op, length);
-
-
-_next_match:
-		// Encode Offset
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-		A16(op) = (ip-ref); op+=2;
-#else
-		{ int delta = ip-ref; *op++ = delta; *op++ = delta>>8; }
-#endif
-
-		// Start Counting
-		ip+=MINMATCH; ref+=MINMATCH;   // MinMatch verified
-		anchor = ip;
-		while (ip<matchlimit-3)
-		{
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-			int diff = A32(ref) ^ A32(ip);
-			if (!diff) { ip+=4; ref+=4; continue; }
-			ip += DeBruijnBytePos[((U32)((diff & -diff) * 0x077CB531U)) >> 27];
-#else
-			if (A32(ref) == A32(ip)) { ip+=4; ref+=4; continue; }
-			if (A16(ref) == A16(ip)) { ip+=2; ref+=2; }
-			if (*ref == *ip) ip++;
-#endif
-			goto _endCount;
-		}
-		if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
-		if ((ip<matchlimit) && (*ref == *ip)) ip++;
-_endCount:
-		len = (ip - anchor);
-		
-		// Encode MatchLength
-		if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } 
-		else *token += len;	
-
-		// Test end of chunk
-		if (ip > mflimit) { anchor = ip;  break; }
-
-		// Fill table
-		HashTable[LZ4_HASH_VALUE(ip-2)] = ip-2;
-
-		// Test next position
-		ref = HashTable[LZ4_HASH_VALUE(ip)];
-		HashTable[LZ4_HASH_VALUE(ip)] = ip;
-		if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; }
-
-		// Prepare next loop
-		anchor = ip++; 
-		forwardH = LZ4_HASH_VALUE(ip);
-	}
-
-_last_literals:
-	// Encode Last Literals
-	{
-		int lastRun = iend - anchor;
-		if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } 
-		else *op++ = (lastRun<<ML_BITS);
-		memcpy(op, anchor, iend - anchor);
-		op += iend-anchor;
-	} 
 
-	// End
-	return (int) (((char*)op)-dest);
-}
+//******************************
+// Compression functions
+//******************************
 
+/*
+int LZ4_compress_stack(
+                 const char* source,
+                 char* dest,
+                 int inputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
+return : the number of bytes written in buffer 'dest'
+*/
+#define FUNCTION_NAME LZ4_compress_stack
+#include "lz4_encoder.h"
 
 
-// Note : this function is valid only if isize < LZ4_64KLIMIT
-#define LZ4_64KLIMIT ((1U<<16) + (MFLIMIT-1))
-#define HASHLOG64K (HASH_LOG+1)
-#define LZ4_HASH64K_FUNCTION(i)	(((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG64K))
-#define LZ4_HASH64K_VALUE(p)	LZ4_HASH64K_FUNCTION(A32(p))
-int LZ4_compress64kCtx(void** ctx,
-				 char* source, 
-				 char* dest,
-				 int isize)
-{	
-#if HEAPMODE
-	struct refTables *srt = (struct refTables *) (*ctx);
-	U16* HashTable;
-#else
-	U16 HashTable[HASHTABLESIZE<<1] = {0};
-#endif
+/*
+int LZ4_compress_stack_limitedOutput(
+                 const char* source,
+                 char* dest,
+                 int inputSize,
+                 int maxOutputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
+If it cannot achieve it, compression will stop, and result of the function will be zero.
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
+*/
+#define FUNCTION_NAME LZ4_compress_stack_limitedOutput
+#define LIMITED_OUTPUT
+#include "lz4_encoder.h"
 
-	const BYTE* ip = (BYTE*) source;       
-	const BYTE* anchor = ip;
-	const BYTE* const base = ip;
-	const BYTE* const iend = ip + isize;
-	const BYTE* const mflimit = iend - MFLIMIT;
-#define matchlimit (iend - LASTLITERALS)
 
-	BYTE* op = (BYTE*) dest;
-	
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-	const size_t DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
-#endif
-	int len, length;
-	const int skipStrength = SKIPSTRENGTH;
-	U32 forwardH;
+/*
+int LZ4_compress64k_stack(
+                 const char* source,
+                 char* dest,
+                 int inputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
+This function compresses better than LZ4_compress_stack(), on the condition that
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
+return : the number of bytes written in buffer 'dest', or 0 if compression fails
+*/
+#define FUNCTION_NAME LZ4_compress64k_stack
+#define COMPRESS_64K
+#include "lz4_encoder.h"
 
 
-	// Init 
-	if (isize<MINLENGTH) goto _last_literals;
-#if HEAPMODE
-	if (*ctx == NULL) 
-	{
-		srt = (struct refTables *) malloc ( sizeof(struct refTables) );
-		*ctx = (void*) srt;
-	}
-	HashTable = (U16*)(srt->hashTable);
-	memset((void*)HashTable, 0, sizeof(srt->hashTable));
-#else
-	(void) ctx;
-#endif
-
-
-	// First Byte
-	ip++; forwardH = LZ4_HASH64K_VALUE(ip);
-	
-	// Main Loop
-    for ( ; ; ) 
-	{
-		int findMatchAttempts = (1U << skipStrength) + 3;
-		const BYTE* forwardIp = ip;
-		const BYTE* ref;
-		BYTE* token;
-
-		// Find a match
-		do {
-			U32 h = forwardH;
-			int step = findMatchAttempts++ >> skipStrength;
-			ip = forwardIp;
-			forwardIp = ip + step;
-
-			if (forwardIp > mflimit) { goto _last_literals; }
-
-			forwardH = LZ4_HASH64K_VALUE(forwardIp);
-			ref = base + HashTable[h];
-			HashTable[h] = ip - base;
-
-		} while (A32(ref) != A32(ip));
-
-		// Catch up
-		while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }  
-
-		// Encode Literal length
-		length = ip - anchor;
-		token = op++;
-		if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } 
-		else *token = (length<<ML_BITS);
-
-		// Copy Literals
-		LZ4_BLINDCOPY(anchor, op, length);
-
-
-_next_match:
-		// Encode Offset
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-		A16(op) = (ip-ref); op+=2;
-#else
-		{ int delta = ip-ref; *op++ = delta; *op++ = delta>>8; }
-#endif
-
-		// Start Counting
-		ip+=MINMATCH; ref+=MINMATCH;   // MinMatch verified
-		anchor = ip;
-		while (ip<matchlimit-3)
-		{
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-			int diff = A32(ref) ^ A32(ip);
-			if (!diff) { ip+=4; ref+=4; continue; }
-			ip += DeBruijnBytePos[((U32)((diff & -diff) * 0x077CB531U)) >> 27];
-#else
-			if (A32(ref) == A32(ip)) { ip+=4; ref+=4; continue; }
-			if (A16(ref) == A16(ip)) { ip+=2; ref+=2; }
-			if (*ref == *ip) ip++;
-#endif
-			goto _endCount;
-		}
-		if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
-		if ((ip<matchlimit) && (*ref == *ip)) ip++;
-_endCount:
-		len = (ip - anchor);
-		
-		// Encode MatchLength
-		if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } 
-		else *token += len;	
-
-		// Test end of chunk
-		if (ip > mflimit) { anchor = ip;  break; }
-
-		// Test next position
-		ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
-		HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base;
-		if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; }
-
-		// Prepare next loop
-		anchor = ip++; 
-		forwardH = LZ4_HASH64K_VALUE(ip);
-	}
-
-_last_literals:
-	// Encode Last Literals
-	{
-		int lastRun = iend - anchor;
-		if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } 
-		else *op++ = (lastRun<<ML_BITS);
-		memcpy(op, anchor, iend - anchor);
-		op += iend-anchor;
-	} 
+/*
+int LZ4_compress64k_stack_limitedOutput(
+                 const char* source,
+                 char* dest,
+                 int inputSize,
+                 int maxOutputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
+This function compresses better than LZ4_compress_stack_limitedOutput(), on the condition that
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
+If it cannot achieve it, compression will stop, and result of the function will be zero.
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
+*/
+#define FUNCTION_NAME LZ4_compress64k_stack_limitedOutput
+#define COMPRESS_64K
+#define LIMITED_OUTPUT
+#include "lz4_encoder.h"
+
+
+/*
+void* LZ4_createHeapMemory();
+int LZ4_freeHeapMemory(void* ctx);
+
+Used to allocate and free hashTable memory 
+to be used by the LZ4_compress_heap* family of functions.
+LZ4_createHeapMemory() returns NULL is memory allocation fails.
+*/
+void* LZ4_create() { return malloc(HASHTABLESIZE); }
+int   LZ4_free(void* ctx) { free(ctx); return 0; }
+
+
+/*
+int LZ4_compress_heap(
+                 void* ctx,
+                 const char* source,
+                 char* dest,
+                 int inputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
+return : the number of bytes written in buffer 'dest'
+*/
+#define FUNCTION_NAME LZ4_compress_heap
+#define USE_HEAPMEMORY
+#include "lz4_encoder.h"
 
-	// End
-	return (int) (((char*)op)-dest);
-}
 
+/*
+int LZ4_compress_heap_limitedOutput(
+                 void* ctx,
+                 const char* source,
+                 char* dest,
+                 int inputSize,
+                 int maxOutputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
+If it cannot achieve it, compression will stop, and result of the function will be zero.
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
+*/
+#define FUNCTION_NAME LZ4_compress_heap_limitedOutput
+#define LIMITED_OUTPUT
+#define USE_HEAPMEMORY
+#include "lz4_encoder.h"
+
+
+/*
+int LZ4_compress64k_heap(
+                 void* ctx,
+                 const char* source,
+                 char* dest,
+                 int inputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
+return : the number of bytes written in buffer 'dest'
+*/
+#define FUNCTION_NAME LZ4_compress64k_heap
+#define COMPRESS_64K
+#define USE_HEAPMEMORY
+#include "lz4_encoder.h"
 
 
-int LZ4_compress(char* source, 
-				 char* dest,
-				 int isize)
+/*
+int LZ4_compress64k_heap_limitedOutput(
+                 void* ctx,
+                 const char* source,
+                 char* dest,
+                 int inputSize,
+                 int maxOutputSize)
+
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
+If it cannot achieve it, compression will stop, and result of the function will be zero.
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
+*/
+#define FUNCTION_NAME LZ4_compress64k_heap_limitedOutput
+#define COMPRESS_64K
+#define LIMITED_OUTPUT
+#define USE_HEAPMEMORY
+#include "lz4_encoder.h"
+
+
+int LZ4_compress(const char* source, char* dest, int inputSize)
 {
 #if HEAPMODE
-	void* ctx = malloc(sizeof(struct refTables));
-	int result;
-	if (isize < LZ4_64KLIMIT)
-		result = LZ4_compress64kCtx(&ctx, source, dest, isize);
-	else result = LZ4_compressCtx(&ctx, source, dest, isize);
-	free(ctx);
-	return result;
+    void* ctx = LZ4_create();
+    int result;
+    if (ctx == NULL) return 0;    // Failed allocation => compression not done
+    if (inputSize < LZ4_64KLIMIT)
+        result = LZ4_compress64k_heap(ctx, source, dest, inputSize);
+    else result = LZ4_compress_heap(ctx, source, dest, inputSize);
+    LZ4_free(ctx);
+    return result;
 #else
-	if (isize < (int)LZ4_64KLIMIT) return LZ4_compress64kCtx(NULL, source, dest, isize);
-	return LZ4_compressCtx(NULL, source, dest, isize);
+    if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack(source, dest, inputSize);
+    return LZ4_compress_stack(source, dest, inputSize);
 #endif
 }
 
 
+int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+#if HEAPMODE
+    void* ctx = LZ4_create();
+    int result;
+    if (ctx == NULL) return 0;    // Failed allocation => compression not done
+    if (inputSize < LZ4_64KLIMIT)
+        result = LZ4_compress64k_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize);
+    else result = LZ4_compress_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize);
+    LZ4_free(ctx);
+    return result;
+#else
+    if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack_limitedOutput(source, dest, inputSize, maxOutputSize);
+    return LZ4_compress_stack_limitedOutput(source, dest, inputSize, maxOutputSize);
+#endif
+}
 
 
 //****************************
-// Decompression CODE
+// Decompression functions
 //****************************
 
-// Note : The decoding functions LZ4_uncompress() and LZ4_uncompress_unknownOutputSize() 
-//		are safe against "buffer overflow" attack type
-//		since they will *never* write outside of the provided output buffer :
-//		they both check this condition *before* writing anything.
-//		A corrupted packet however can make them *read* within the first 64K before the output buffer.
-
-int LZ4_uncompress(char* source, 
-				 char* dest,
-				 int osize)
-{	
-	// Local Variables
-	const BYTE* restrict ip = (const BYTE*) source;
-	const BYTE* restrict ref;
-
-	BYTE* restrict op = (BYTE*) dest;
-	BYTE* const oend = op + osize;
-	BYTE* cpy;
-
-	BYTE token;
-	
-	U32	dec[4]={0, 3, 2, 3};
-	int	len, length;
-
-
-	// Main Loop
-	while (1)
-	{
-		// get runlength
-		token = *ip++;
-		if ((length=(token>>ML_BITS)) == RUN_MASK)  { for (;(len=*ip++)==255;length+=255){} length += len; } 
-
-		// copy literals
-		cpy = op+length;
-		if (cpy>oend-COPYLENGTH) 
-		{ 
-			if (cpy > oend) goto _output_error;
-			memcpy(op, ip, length);
-			ip += length;
-			break;    // Necessarily EOF
-		}
-		LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
-
-
-		// get offset
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-		ref = cpy - A16(ip); ip+=2;
-#else
-		{ int delta = *ip++; delta += *ip++ << 8; ref = cpy - delta; }
-#endif
-
-		// get matchlength
-		if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; } 
-
-		// copy repeated sequence
-		if (op-ref<COPYTOKEN)
-		{
-			*op++ = *ref++;
-			*op++ = *ref++;
-			*op++ = *ref++;
-			*op++ = *ref++;
-			ref -= dec[op-ref];
-			A32(op)=A32(ref); 
-		} else { A32(op)=A32(ref); op+=4; ref+=4; }
-		cpy = op + length;
-		if (cpy > oend-COPYLENGTH)
-		{
-			if (cpy > oend) goto _output_error;	
-			LZ4_WILDCOPY(ref, op, (oend-COPYLENGTH));
-			while(op<cpy) *op++=*ref++;
-			op=cpy;
-			if (op == oend) break;    // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
-			continue;
-		}
-		LZ4_WILDCOPY(ref, op, cpy);
-		op=cpy;		// correction
-	}
-
-	// end of decoding
-	return (int) (((char*)ip)-source);
+typedef enum { noPrefix = 0, withPrefix = 1 } prefix64k_directive;
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
+typedef enum { full = 0, partial = 1 } earlyEnd_directive;
+
+
+// This generic decompression function cover all use cases.
+// It shall be instanciated several times, using different sets of directives
+// Note that it is essential this generic function is really inlined, 
+// in order to remove useless branches during compilation optimisation.
+forceinline int LZ4_decompress_generic(
+                 const char* source,
+                 char* dest,
+                 int inputSize,          //
+                 int outputSize,         // OutputSize must be != 0; if endOnInput==endOnInputSize, this value is the max size of Output Buffer.
+
+                 int endOnInput,         // endOnOutputSize, endOnInputSize
+                 int prefix64k,          // noPrefix, withPrefix
+                 int partialDecoding,    // full, partial
+                 int targetOutputSize    // only used if partialDecoding==partial
+                 )
+{
+    // Local Variables
+    const BYTE* restrict ip = (const BYTE*) source;
+    const BYTE* ref;
+    const BYTE* const iend = ip + inputSize;
+
+    BYTE* op = (BYTE*) dest;
+    BYTE* const oend = op + outputSize;
+    BYTE* cpy;
+    BYTE* oexit = op + targetOutputSize;
+
+    size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
+#if LZ4_ARCH64
+    size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
+#endif
+
+
+    // Special cases
+    if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT;                        // targetOutputSize too high => decode everything
+    if ((endOnInput) && unlikely(outputSize==0)) return ((inputSize==1) && (*ip==0)) ? 0 : -1;   // Empty output buffer
+    if ((!endOnInput) && unlikely(outputSize==0)) return (*ip==0?1:-1);
+
+
+    // Main Loop
+    while (1)
+    {
+        unsigned token;
+        size_t length;
+
+        // get runlength
+        token = *ip++;
+        if ((length=(token>>ML_BITS)) == RUN_MASK)
+        { 
+            unsigned s=255; 
+            while (((endOnInput)?ip<iend:1) && (s==255))
+            { 
+                s = *ip++; 
+                length += s; 
+            } 
+        }
+
+        // copy literals
+        cpy = op+length;
+        if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
+            || ((!endOnInput) && (cpy>oend-COPYLENGTH)))
+        {
+            if (partialDecoding)
+            {
+                if (cpy > oend) goto _output_error;                           // Error : write attempt beyond end of output buffer
+                if ((endOnInput) && (ip+length > iend)) goto _output_error;   // Error : read attempt beyond end of input buffer
+            }
+            else
+            {
+                if ((!endOnInput) && (cpy != oend)) goto _output_error;       // Error : block decoding must stop exactly there
+                if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error;   // Error : input must be consumed
+            }
+            memcpy(op, ip, length);
+            ip += length;
+            op += length;
+            break;                                       // Necessarily EOF, due to parsing restrictions
+        }
+        LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
+
+        // get offset
+        LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
+        if ((prefix64k==noPrefix) && unlikely(ref < (BYTE* const)dest)) goto _output_error;   // Error : offset outside destination buffer
+
+        // get matchlength
+        if ((length=(token&ML_MASK)) == ML_MASK) 
+        { 
+            for ( ; (!endOnInput) || (ip<iend-(LASTLITERALS+1)) ; )   // Ensure enough bytes remain for LASTLITERALS + token
+            {
+                unsigned s = *ip++; 
+                length += s; 
+                if (s==255) continue; 
+                break; 
+            }
+        }
+
+        // copy repeated sequence
+        if unlikely((op-ref)<(int)STEPSIZE)
+        {
+#if LZ4_ARCH64
+            size_t dec64 = dec64table[op-ref];
+#else
+            const size_t dec64 = 0;
+#endif
+            op[0] = ref[0];
+            op[1] = ref[1];
+            op[2] = ref[2];
+            op[3] = ref[3];
+            op += 4, ref += 4; ref -= dec32table[op-ref];
+            A32(op) = A32(ref); 
+            op += STEPSIZE-4; ref -= dec64;
+        } else { LZ4_COPYSTEP(ref,op); }
+        cpy = op + length - (STEPSIZE-4);
+
+        if unlikely(cpy>oend-(COPYLENGTH)-(STEPSIZE-4))
+        {
+            if (cpy > oend-LASTLITERALS) goto _output_error;    // Error : last 5 bytes must be literals
+            LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
+            while(op<cpy) *op++=*ref++;
+            op=cpy;
+            continue;
+        }
+        LZ4_WILDCOPY(ref, op, cpy);
+        op=cpy;   // correction
+    }
+
+    // end of decoding
+    if (endOnInput)
+       return (int) (((char*)op)-dest);     // Nb of output bytes decoded
+    else
+       return (int) (((char*)ip)-source);   // Nb of input bytes read
 
-	// write overflow error detected
+    // Overflow error detected
 _output_error:
-	return (int) (-(((char*)ip)-source));
+    return (int) (-(((char*)ip)-source))-1;
 }
 
 
-int LZ4_uncompress_unknownOutputSize(
-				char* source, 
-				char* dest,
-				int isize,
-				int maxOutputSize)
-{	
-	// Local Variables
-	const BYTE* restrict ip = (const BYTE*) source;
-	const BYTE* const iend = ip + isize;
-	const BYTE* restrict ref;
-
-	BYTE* restrict op = (BYTE*) dest;
-	BYTE* const oend = op + maxOutputSize;
-	BYTE* cpy;
-
-	BYTE token;
-	
-	U32	dec[4]={0, 3, 2, 3};
-	int	len, length;
-
-
-	// Main Loop
-	while (ip<iend)
-	{
-		// get runlength
-		token = *ip++;
-		if ((length=(token>>ML_BITS)) == RUN_MASK)  { for (;(len=*ip++)==255;length+=255){} length += len; } 
-
-		// copy literals
-		cpy = op+length;
-		if (cpy>oend-COPYLENGTH) 
-		{ 
-			if (cpy > oend) goto _output_error;
-			memcpy(op, ip, length);
-			op += length;
-			break;    // Necessarily EOF
-		}
-		LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
-		if (ip>=iend) break;    // check EOF
-
-		// get offset
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-		ref = cpy - A16(ip); ip+=2;
-#else
-		{ int delta = *ip++; delta += *ip++ << 8; ref = cpy - delta; }
-#endif
-
-		// get matchlength
-		if ((length=(token&ML_MASK)) == ML_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; }
-
-		// copy repeated sequence
-		if (op-ref<COPYTOKEN)
-		{
-			*op++ = *ref++;
-			*op++ = *ref++;
-			*op++ = *ref++;
-			*op++ = *ref++;
-			ref -= dec[op-ref];
-			A32(op)=A32(ref); 
-		} else { A32(op)=A32(ref); op+=4; ref+=4; }
-		cpy = op + length;
-		if (cpy>oend-COPYLENGTH)
-		{
-			if (cpy > oend) goto _output_error;	
-			LZ4_WILDCOPY(ref, op, (oend-COPYLENGTH));
-			while(op<cpy) *op++=*ref++;
-			op=cpy;
-			if (op == oend) break;    // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
-			continue;
-		}
-		LZ4_WILDCOPY(ref, op, cpy);
-		op=cpy;		// correction
-	}
+int LZ4_decompress_safe(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+    return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, full, 0);
+}
 
-	// end of decoding
-	return (int) (((char*)op)-dest);
+int LZ4_decompress_fast(const char* source, char* dest, int outputSize)
+{
+    return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, noPrefix, full, 0);
+}
 
-	// write overflow error detected
-_output_error:
-	return (int) (-(((char*)ip)-source));
+int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+    return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, withPrefix, full, 0);
+}
+
+int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int outputSize)
+{
+    return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, withPrefix, full, 0);
+}
+
+int LZ4_decompress_safe_partial(const char* source, char* dest, int inputSize, int targetOutputSize, int maxOutputSize)
+{
+    return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, partial, targetOutputSize);
 }
 

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c Mon Aug 12 21:25:49 2013
@@ -30,6 +30,10 @@
 #include "config.h"
 #endif // UNIX
 
+#ifdef WINDOWS
+#include "winutils.h"
+#endif
+
 #include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
 
 #define JINT_MAX 0x7fffffff
@@ -40,11 +44,18 @@ static jfieldID SnappyCompressor_uncompr
 static jfieldID SnappyCompressor_compressedDirectBuf;
 static jfieldID SnappyCompressor_directBufferSize;
 
+#ifdef UNIX
 static snappy_status (*dlsym_snappy_compress)(const char*, size_t, char*, size_t*);
+#endif
+
+#ifdef WINDOWS
+typedef snappy_status (__cdecl *__dlsym_snappy_compress)(const char*, size_t, char*, size_t*);
+static __dlsym_snappy_compress dlsym_snappy_compress;
+#endif
 
 JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompressor_initIDs
 (JNIEnv *env, jclass clazz){
-
+#ifdef UNIX
   // Load libsnappy.so
   void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
   if (!libsnappy) {
@@ -53,10 +64,25 @@ JNIEXPORT void JNICALL Java_org_apache_h
     THROW(env, "java/lang/UnsatisfiedLinkError", msg);
     return;
   }
+#endif
+
+#ifdef WINDOWS
+  HMODULE libsnappy = LoadLibrary(HADOOP_SNAPPY_LIBRARY);
+  if (!libsnappy) {
+    THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load snappy.dll");
+    return;
+  }
+#endif
 
   // Locate the requisite symbols from libsnappy.so
+#ifdef UNIX
   dlerror();                                 // Clear any existing error
   LOAD_DYNAMIC_SYMBOL(dlsym_snappy_compress, env, libsnappy, "snappy_compress");
+#endif
+
+#ifdef WINDOWS
+  LOAD_DYNAMIC_SYMBOL(__dlsym_snappy_compress, dlsym_snappy_compress, env, libsnappy, "snappy_compress");
+#endif
 
   SnappyCompressor_clazz = (*env)->GetStaticFieldID(env, clazz, "clazz",
                                                  "Ljava/lang/Class;");
@@ -74,6 +100,9 @@ JNIEXPORT void JNICALL Java_org_apache_h
 
 JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompressor_compressBytesDirect
 (JNIEnv *env, jobject thisj){
+  const char* uncompressed_bytes;
+  char* compressed_bytes;
+  snappy_status ret;
   // Get members of SnappyCompressor
   jobject clazz = (*env)->GetStaticObjectField(env, thisj, SnappyCompressor_clazz);
   jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_uncompressedDirectBuf);
@@ -84,7 +113,7 @@ JNIEXPORT jint JNICALL Java_org_apache_h
 
   // Get the input direct buffer
   LOCK_CLASS(env, clazz, "SnappyCompressor");
-  const char* uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
+  uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
   UNLOCK_CLASS(env, clazz, "SnappyCompressor");
 
   if (uncompressed_bytes == 0) {
@@ -93,7 +122,7 @@ JNIEXPORT jint JNICALL Java_org_apache_h
 
   // Get the output direct buffer
   LOCK_CLASS(env, clazz, "SnappyCompressor");
-  char* compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
+  compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
   UNLOCK_CLASS(env, clazz, "SnappyCompressor");
 
   if (compressed_bytes == 0) {
@@ -102,8 +131,8 @@ JNIEXPORT jint JNICALL Java_org_apache_h
 
   /* size_t should always be 4 bytes or larger. */
   buf_len = (size_t)compressed_direct_buf_len;
-  snappy_status ret = dlsym_snappy_compress(uncompressed_bytes,
-        uncompressed_direct_buf_len, compressed_bytes, &buf_len);
+  ret = dlsym_snappy_compress(uncompressed_bytes, uncompressed_direct_buf_len,
+        compressed_bytes, &buf_len);
   if (ret != SNAPPY_OK){
     THROW(env, "Ljava/lang/InternalError", "Could not compress data. Buffer length is too small.");
     return 0;
@@ -117,4 +146,29 @@ JNIEXPORT jint JNICALL Java_org_apache_h
   return (jint)buf_len;
 }
 
+JNIEXPORT jstring JNICALL
+Java_org_apache_hadoop_io_compress_snappy_SnappyCompressor_getLibraryName(JNIEnv *env, jclass class) {
+#ifdef UNIX
+  if (dlsym_snappy_compress) {
+    Dl_info dl_info;
+    if(dladdr(
+        dlsym_snappy_compress,
+        &dl_info)) {
+      return (*env)->NewStringUTF(env, dl_info.dli_fname);
+    }
+  }
+
+  return (*env)->NewStringUTF(env, HADOOP_SNAPPY_LIBRARY);
+#endif
+
+#ifdef WINDOWS
+  LPWSTR filename = NULL;
+  GetLibraryName(dlsym_snappy_compress, &filename);
+  if (filename != NULL) {
+    return (*env)->NewString(env, filename, (jsize) wcslen(filename));
+  } else {
+    return (*env)->NewStringUTF(env, "Unavailable");
+  }
+#endif
+}
 #endif //define HADOOP_SNAPPY_LIBRARY

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c Mon Aug 12 21:25:49 2013
@@ -37,12 +37,20 @@ static jfieldID SnappyDecompressor_compr
 static jfieldID SnappyDecompressor_uncompressedDirectBuf;
 static jfieldID SnappyDecompressor_directBufferSize;
 
+#ifdef UNIX
 static snappy_status (*dlsym_snappy_uncompress)(const char*, size_t, char*, size_t*);
+#endif
+
+#ifdef WINDOWS
+typedef snappy_status (__cdecl *__dlsym_snappy_uncompress)(const char*, size_t, char*, size_t*);
+static __dlsym_snappy_uncompress dlsym_snappy_uncompress;
+#endif
 
 JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompressor_initIDs
 (JNIEnv *env, jclass clazz){
 
   // Load libsnappy.so
+#ifdef UNIX
   void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
   if (!libsnappy) {
     char* msg = (char*)malloc(1000);
@@ -50,11 +58,27 @@ JNIEXPORT void JNICALL Java_org_apache_h
     THROW(env, "java/lang/UnsatisfiedLinkError", msg);
     return;
   }
+#endif
+
+#ifdef WINDOWS
+  HMODULE libsnappy = LoadLibrary(HADOOP_SNAPPY_LIBRARY);
+  if (!libsnappy) {
+    THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load snappy.dll");
+    return;
+  }
+#endif
 
   // Locate the requisite symbols from libsnappy.so
+#ifdef UNIX
   dlerror();                                 // Clear any existing error
   LOAD_DYNAMIC_SYMBOL(dlsym_snappy_uncompress, env, libsnappy, "snappy_uncompress");
 
+#endif
+
+#ifdef WINDOWS
+  LOAD_DYNAMIC_SYMBOL(__dlsym_snappy_uncompress, dlsym_snappy_uncompress, env, libsnappy, "snappy_uncompress");
+#endif
+
   SnappyDecompressor_clazz = (*env)->GetStaticFieldID(env, clazz, "clazz",
                                                    "Ljava/lang/Class;");
   SnappyDecompressor_compressedDirectBuf = (*env)->GetFieldID(env,clazz,
@@ -71,6 +95,9 @@ JNIEXPORT void JNICALL Java_org_apache_h
 
 JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompressor_decompressBytesDirect
 (JNIEnv *env, jobject thisj){
+  const char* compressed_bytes = NULL;
+  char* uncompressed_bytes = NULL;
+  snappy_status ret;
   // Get members of SnappyDecompressor
   jobject clazz = (*env)->GetStaticObjectField(env,thisj, SnappyDecompressor_clazz);
   jobject compressed_direct_buf = (*env)->GetObjectField(env,thisj, SnappyDecompressor_compressedDirectBuf);
@@ -80,7 +107,7 @@ JNIEXPORT jint JNICALL Java_org_apache_h
 
   // Get the input direct buffer
   LOCK_CLASS(env, clazz, "SnappyDecompressor");
-  const char* compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
+  compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
   UNLOCK_CLASS(env, clazz, "SnappyDecompressor");
 
   if (compressed_bytes == 0) {
@@ -89,14 +116,15 @@ JNIEXPORT jint JNICALL Java_org_apache_h
 
   // Get the output direct buffer
   LOCK_CLASS(env, clazz, "SnappyDecompressor");
-  char* uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
+  uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
   UNLOCK_CLASS(env, clazz, "SnappyDecompressor");
 
   if (uncompressed_bytes == 0) {
     return (jint)0;
   }
 
-  snappy_status ret = dlsym_snappy_uncompress(compressed_bytes, compressed_direct_buf_len, uncompressed_bytes, &uncompressed_direct_buf_len);
+  ret = dlsym_snappy_uncompress(compressed_bytes, compressed_direct_buf_len,
+        uncompressed_bytes, &uncompressed_direct_buf_len);
   if (ret == SNAPPY_BUFFER_TOO_SMALL){
     THROW(env, "Ljava/lang/InternalError", "Could not decompress data. Buffer length is too small.");
   } else if (ret == SNAPPY_INVALID_INPUT){

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h Mon Aug 12 21:25:49 2013
@@ -21,7 +21,11 @@
 #define ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
 
 #include "org_apache_hadoop.h"
+
+#ifdef UNIX
 #include <dlfcn.h>
+#endif
+
 #include <jni.h>
 #include <snappy-c.h>
 #include <stddef.h>

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c Mon Aug 12 21:25:49 2013
@@ -367,6 +367,21 @@ Java_org_apache_hadoop_io_compress_zlib_
     }
 }
 
+JNIEXPORT jstring JNICALL
+Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_getLibraryName(JNIEnv *env, jclass class) {
+#ifdef UNIX
+  if (dlsym_deflateInit2_) {
+    Dl_info dl_info;
+    if(dladdr(
+        dlsym_deflateInit2_,
+        &dl_info)) {
+      return (*env)->NewStringUTF(env, dl_info.dli_fname);
+    }
+  }
+#endif
+  return (*env)->NewStringUTF(env, HADOOP_ZLIB_LIBRARY);
+}
+
 /**
  * vim: sw=2: ts=2: et:
  */

Modified: hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c (original)
+++ hadoop/common/branches/YARN-321/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c Mon Aug 12 21:25:49 2013
@@ -19,9 +19,14 @@
 #include "org_apache_hadoop.h"
 
 #ifdef UNIX
+#include <dlfcn.h>
 #include "config.h"
 #endif // UNIX
 
+#ifdef WINDOWS
+#include "winutils.h"
+#endif
+
 #include <jni.h>
 
 JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_buildSupportsSnappy
@@ -32,4 +37,30 @@ JNIEXPORT jboolean JNICALL Java_org_apac
 #else
   return JNI_FALSE;
 #endif
-}
\ No newline at end of file
+}
+
+JNIEXPORT jstring JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_getLibraryName
+  (JNIEnv *env, jclass clazz)
+{
+#ifdef UNIX
+  Dl_info dl_info;
+  int ret = dladdr(
+      Java_org_apache_hadoop_util_NativeCodeLoader_getLibraryName,
+      &dl_info);
+  return (*env)->NewStringUTF(env, ret==0 ? "Unavailable" : dl_info.dli_fname);
+#endif
+
+#ifdef WINDOWS
+  LPWSTR filename = NULL;
+  GetLibraryName(Java_org_apache_hadoop_util_NativeCodeLoader_getLibraryName,
+    &filename);
+  if (filename != NULL)
+  {
+    return (*env)->NewString(env, filename, (jsize) wcslen(filename));
+  }
+  else
+  {
+    return (*env)->NewStringUTF(env, "Unavailable");
+  }
+#endif
+}