You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ey...@apache.org on 2013/08/18 19:13:47 UTC

svn commit: r1515172 - in /hadoop/common/branches/branch-1: CHANGES.txt src/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c

Author: eyang
Date: Sun Aug 18 17:13:47 2013
New Revision: 1515172

URL: http://svn.apache.org/r1515172
Log:
HADOOP-9863. Backport HADOOP-8686 to support BigEndian on ppc64. 
(Yu Li via eyang)


Modified:
    hadoop/common/branches/branch-1/CHANGES.txt
    hadoop/common/branches/branch-1/src/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1515172&r1=1515171&r2=1515172&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Sun Aug 18 17:13:47 2013
@@ -39,6 +39,9 @@ Release 1.3.0 - unreleased
 
   BUG FIXES
 
+    HADOOP-9863. Backport HADOOP-8686 to support BigEndian on ppc64. 
+    (Yu Li via eyang)
+
     MAPREDUCE-5047. keep.failed.task.files=true causes job failure on 
     secure clusters. (sandyr via tucu)
 

Modified: hadoop/common/branches/branch-1/src/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c?rev=1515172&r1=1515171&r2=1515172&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c (original)
+++ hadoop/common/branches/branch-1/src/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c Sun Aug 18 17:13:47 2013
@@ -49,6 +49,8 @@
 #include "org_apache_hadoop_io_compress_snappy.h"
 #include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
 
+#define JINT_MAX 0x7fffffff
+
 static jfieldID SnappyCompressor_clazz;
 static jfieldID SnappyCompressor_uncompressedDirectBuf;
 static jfieldID SnappyCompressor_uncompressedDirectBufLen;
@@ -63,7 +65,7 @@ JNIEXPORT void JNICALL Java_org_apache_h
   // Load libsnappy.so
   void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
   if (!libsnappy) {
-    char* msg = (char*)malloc(1000);
+    char msg[1000];
     snprintf(msg, 1000, "%s (%s)!", "Cannot load " HADOOP_SNAPPY_LIBRARY, dlerror());
     THROW(env, "java/lang/UnsatisfiedLinkError", msg);
     return;
@@ -95,6 +97,7 @@ JNIEXPORT jint JNICALL Java_org_apache_h
   jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen);
   jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_compressedDirectBuf);
   jint compressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_directBufferSize);
+  size_t buf_len;
 
   // Get the input direct buffer
   LOCK_CLASS(env, clazz, "SnappyCompressor");
@@ -102,7 +105,7 @@ JNIEXPORT jint JNICALL Java_org_apache_h
   UNLOCK_CLASS(env, clazz, "SnappyCompressor");
 
   if (uncompressed_bytes == 0) {
-    return (jint)0;
+    return 0;
   }
 
   // Get the output direct buffer
@@ -111,17 +114,24 @@ JNIEXPORT jint JNICALL Java_org_apache_h
   UNLOCK_CLASS(env, clazz, "SnappyCompressor");
 
   if (compressed_bytes == 0) {
-    return (jint)0;
+    return 0;
   }
 
-  snappy_status ret = dlsym_snappy_compress(uncompressed_bytes, uncompressed_direct_buf_len, compressed_bytes, &compressed_direct_buf_len);
+  /* size_t should always be 4 bytes or larger. */
+  buf_len = (size_t)compressed_direct_buf_len;
+  snappy_status ret = dlsym_snappy_compress(uncompressed_bytes,
+        uncompressed_direct_buf_len, compressed_bytes, &buf_len);
   if (ret != SNAPPY_OK){
-    THROW(env, "java/lang/InternalError", "Could not compress data. Buffer length is too small.");
+    THROW(env, "Ljava/lang/InternalError", "Could not compress data. Buffer length is too small.");
+    return 0;
+  }
+  if (buf_len > JINT_MAX) {
+    THROW(env, "Ljava/lang/InternalError", "Invalid return buffer length.");
+    return 0;
   }
 
   (*env)->SetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen, 0);
-
-  return (jint)compressed_direct_buf_len;
+  return (jint)buf_len;
 }
 
 #endif //define HADOOP_SNAPPY_LIBRARY