You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/12/10 01:33:02 UTC

svn commit: r1644264 - /hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java

Author: brock
Date: Wed Dec 10 00:33:02 2014
New Revision: 1644264

URL: http://svn.apache.org/r1644264
Log:
HIVE-9054 - Add additional logging to SetSparkReducerParallelism [Spark Branch] (Brock reviewed by Jimmy Xiang)

Modified:
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java?rev=1644264&r1=1644263&r2=1644264&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java Wed Dec 10 00:33:02 2014
@@ -88,7 +88,7 @@ public class SetSparkReducerParallelism
             hive_metastoreConstants.BUCKET_COUNT);
           int numBuckets = bucketCount == null ? 0 : Integer.parseInt(bucketCount);
           if (numBuckets > 0) {
-            LOG.info("Set parallelism for reduce sink " + sink + " to: " + numBuckets);
+            LOG.info("Set parallelism for reduce sink " + sink + " to: " + numBuckets + " (buckets)");
             desc.setNumReducers(numBuckets);
             return false;
           }
@@ -100,6 +100,9 @@ public class SetSparkReducerParallelism
           sink.getChildOperators().get(0).getParentOperators()) {
           if (sibling.getStatistics() != null) {
             numberOfBytes += sibling.getStatistics().getDataSize();
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Sibling " + sibling + " has stats: " + sibling.getStatistics());
+            }
           } else {
             LOG.warn("No stats available from: " + sibling);
           }
@@ -114,13 +117,13 @@ public class SetSparkReducerParallelism
               context.getConf(), sparkSessionManager);
             sparkMemoryAndCores = sparkSession.getMemoryAndCores();
           } catch (Exception e) {
-            throw new SemanticException("Failed to get spark memory/core info", e);
+            throw new SemanticException("Failed to get spark memory/core info: " + e, e);
           } finally {
             if (sparkSession != null && sparkSessionManager != null) {
               try {
                 sparkSessionManager.returnSession(sparkSession);
-              } catch(HiveException ex) {
-                LOG.error("Failed to return the session to SessionManager", ex);
+              } catch (HiveException ex) {
+                LOG.error("Failed to return the session to SessionManager: " + ex, ex);
               }
             }
           }
@@ -136,7 +139,9 @@ public class SetSparkReducerParallelism
         if (numReducers < cores) {
           numReducers = cores;
         }
-        LOG.info("Set parallelism for reduce sink " + sink + " to: " + numReducers);
+        LOG.info("Set parallelism parameters: cores = " + cores + ", numReducers = " + numReducers +
+          ", bytesPerReducer = " + bytesPerReducer + ", numberOfBytes = " + numberOfBytes);
+        LOG.info("Set parallelism for reduce sink " + sink + " to: " + numReducers + " (calculated)");
         desc.setNumReducers(numReducers);
       }
     } else {