You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by zs...@apache.org on 2017/09/06 04:28:58 UTC

spark git commit: [SPARK-9104][CORE] Expose Netty memory metrics in Spark

Repository: spark
Updated Branches:
  refs/heads/master 6a2325448 -> 445f1790a


[SPARK-9104][CORE] Expose Netty memory metrics in Spark

## What changes were proposed in this pull request?

This PR exposes Netty memory usage for Spark's `TransportClientFactory` and `TransportServer`, including the details of each direct arena and heap arena metrics, as well as aggregated metrics. The purpose of adding the Netty metrics is to better know the memory usage of Netty in Spark shuffle, rpc and others network communications, and guide us to better configure the memory size of executors.

This PR doesn't expose these metrics to any sink, to leverage this feature, still requires to connect to either MetricsSystem or collect them back to Driver to display.

## How was this patch tested?

Add Unit test to verify it, also manually verified in real cluster.

Author: jerryshao <ss...@hortonworks.com>

Closes #18935 from jerryshao/SPARK-9104.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/445f1790
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/445f1790
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/445f1790

Branch: refs/heads/master
Commit: 445f1790ade1c53cf7eee1f282395648e4d0992c
Parents: 6a23254
Author: jerryshao <ss...@hortonworks.com>
Authored: Tue Sep 5 21:28:54 2017 -0700
Committer: Shixiong Zhu <zs...@gmail.com>
Committed: Tue Sep 5 21:28:54 2017 -0700

----------------------------------------------------------------------
 common/network-common/pom.xml                   |   5 +
 .../network/client/TransportClientFactory.java  |  13 +-
 .../spark/network/server/TransportServer.java   |  14 +-
 .../spark/network/util/NettyMemoryMetrics.java  | 145 ++++++++++++++++
 .../spark/network/util/TransportConf.java       |  10 ++
 .../network/util/NettyMemoryMetricsSuite.java   | 171 +++++++++++++++++++
 dev/deps/spark-deps-hadoop-2.6                  |   2 +-
 dev/deps/spark-deps-hadoop-2.7                  |   2 +-
 pom.xml                                         |   2 +-
 9 files changed, 353 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/445f1790/common/network-common/pom.xml
----------------------------------------------------------------------
diff --git a/common/network-common/pom.xml b/common/network-common/pom.xml
index ccd8504..18cbdad 100644
--- a/common/network-common/pom.xml
+++ b/common/network-common/pom.xml
@@ -61,6 +61,11 @@
       <artifactId>jackson-annotations</artifactId>
     </dependency>
 
+    <dependency>
+      <groupId>io.dropwizard.metrics</groupId>
+      <artifactId>metrics-core</artifactId>
+    </dependency>
+
     <!-- Provided dependencies -->
     <dependency>
       <groupId>org.slf4j</groupId>

http://git-wip-us.apache.org/repos/asf/spark/blob/445f1790/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
----------------------------------------------------------------------
diff --git a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
index 8add4e1..16d242d 100644
--- a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
+++ b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
@@ -26,6 +26,7 @@ import java.util.Random;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicReference;
 
+import com.codahale.metrics.MetricSet;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Throwables;
 import com.google.common.collect.Lists;
@@ -42,10 +43,7 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.spark.network.TransportContext;
 import org.apache.spark.network.server.TransportChannelHandler;
-import org.apache.spark.network.util.IOMode;
-import org.apache.spark.network.util.JavaUtils;
-import org.apache.spark.network.util.NettyUtils;
-import org.apache.spark.network.util.TransportConf;
+import org.apache.spark.network.util.*;
 
 /**
  * Factory for creating {@link TransportClient}s by using createClient.
@@ -87,6 +85,7 @@ public class TransportClientFactory implements Closeable {
   private final Class<? extends Channel> socketChannelClass;
   private EventLoopGroup workerGroup;
   private PooledByteBufAllocator pooledAllocator;
+  private final NettyMemoryMetrics metrics;
 
   public TransportClientFactory(
       TransportContext context,
@@ -106,6 +105,12 @@ public class TransportClientFactory implements Closeable {
         conf.getModuleName() + "-client");
     this.pooledAllocator = NettyUtils.createPooledByteBufAllocator(
       conf.preferDirectBufs(), false /* allowCache */, conf.clientThreads());
+    this.metrics = new NettyMemoryMetrics(
+      this.pooledAllocator, conf.getModuleName() + "-client", conf);
+  }
+
+  public MetricSet getAllMetrics() {
+    return metrics;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/spark/blob/445f1790/common/network-common/src/main/java/org/apache/spark/network/server/TransportServer.java
----------------------------------------------------------------------
diff --git a/common/network-common/src/main/java/org/apache/spark/network/server/TransportServer.java b/common/network-common/src/main/java/org/apache/spark/network/server/TransportServer.java
index 047c5f3..3f1fea8 100644
--- a/common/network-common/src/main/java/org/apache/spark/network/server/TransportServer.java
+++ b/common/network-common/src/main/java/org/apache/spark/network/server/TransportServer.java
@@ -22,6 +22,7 @@ import java.net.InetSocketAddress;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 
+import com.codahale.metrics.MetricSet;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import io.netty.bootstrap.ServerBootstrap;
@@ -31,14 +32,11 @@ import io.netty.channel.ChannelInitializer;
 import io.netty.channel.ChannelOption;
 import io.netty.channel.EventLoopGroup;
 import io.netty.channel.socket.SocketChannel;
-import org.apache.spark.network.util.JavaUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.spark.network.TransportContext;
-import org.apache.spark.network.util.IOMode;
-import org.apache.spark.network.util.NettyUtils;
-import org.apache.spark.network.util.TransportConf;
+import org.apache.spark.network.util.*;
 
 /**
  * Server for the efficient, low-level streaming service.
@@ -54,6 +52,7 @@ public class TransportServer implements Closeable {
   private ServerBootstrap bootstrap;
   private ChannelFuture channelFuture;
   private int port = -1;
+  private NettyMemoryMetrics metrics;
 
   /**
    * Creates a TransportServer that binds to the given host and the given port, or to any available
@@ -101,6 +100,9 @@ public class TransportServer implements Closeable {
       .option(ChannelOption.ALLOCATOR, allocator)
       .childOption(ChannelOption.ALLOCATOR, allocator);
 
+    this.metrics = new NettyMemoryMetrics(
+      allocator, conf.getModuleName() + "-server", conf);
+
     if (conf.backLog() > 0) {
       bootstrap.option(ChannelOption.SO_BACKLOG, conf.backLog());
     }
@@ -133,6 +135,10 @@ public class TransportServer implements Closeable {
     logger.debug("Shuffle server started on port: {}", port);
   }
 
+  public MetricSet getAllMetrics() {
+    return metrics;
+  }
+
   @Override
   public void close() {
     if (channelFuture != null) {

http://git-wip-us.apache.org/repos/asf/spark/blob/445f1790/common/network-common/src/main/java/org/apache/spark/network/util/NettyMemoryMetrics.java
----------------------------------------------------------------------
diff --git a/common/network-common/src/main/java/org/apache/spark/network/util/NettyMemoryMetrics.java b/common/network-common/src/main/java/org/apache/spark/network/util/NettyMemoryMetrics.java
new file mode 100644
index 0000000..969d9f6
--- /dev/null
+++ b/common/network-common/src/main/java/org/apache/spark/network/util/NettyMemoryMetrics.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.network.util;
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.*;
+
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Metric;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.MetricSet;
+import com.google.common.annotations.VisibleForTesting;
+import io.netty.buffer.PoolArenaMetric;
+import io.netty.buffer.PooledByteBufAllocator;
+import io.netty.buffer.PooledByteBufAllocatorMetric;
+
+/**
+ * A Netty memory metrics class to collect metrics from Netty PooledByteBufAllocator.
+ */
+public class NettyMemoryMetrics implements MetricSet {
+
+  private final PooledByteBufAllocator pooledAllocator;
+
+  private final boolean verboseMetricsEnabled;
+
+  private final Map<String, Metric> allMetrics;
+
+  private final String metricPrefix;
+
+  @VisibleForTesting
+  final static Set<String> VERBOSE_METRICS = new HashSet<>();
+  static {
+    VERBOSE_METRICS.addAll(Arrays.asList(
+      "numAllocations",
+      "numTinyAllocations",
+      "numSmallAllocations",
+      "numNormalAllocations",
+      "numHugeAllocations",
+      "numDeallocations",
+      "numTinyDeallocations",
+      "numSmallDeallocations",
+      "numNormalDeallocations",
+      "numHugeDeallocations",
+      "numActiveAllocations",
+      "numActiveTinyAllocations",
+      "numActiveSmallAllocations",
+      "numActiveNormalAllocations",
+      "numActiveHugeAllocations",
+      "numActiveBytes"));
+  }
+
+  public NettyMemoryMetrics(PooledByteBufAllocator pooledAllocator,
+      String metricPrefix,
+      TransportConf conf) {
+    this.pooledAllocator = pooledAllocator;
+    this.allMetrics = new HashMap<>();
+    this.metricPrefix = metricPrefix;
+    this.verboseMetricsEnabled = conf.verboseMetrics();
+
+    registerMetrics(this.pooledAllocator);
+  }
+
+  private void registerMetrics(PooledByteBufAllocator allocator) {
+    PooledByteBufAllocatorMetric pooledAllocatorMetric = allocator.metric();
+
+    // Register general metrics.
+    allMetrics.put(MetricRegistry.name(metricPrefix, "usedHeapMemory"),
+      (Gauge<Long>) () -> pooledAllocatorMetric.usedHeapMemory());
+    allMetrics.put(MetricRegistry.name(metricPrefix, "usedDirectMemory"),
+      (Gauge<Long>) () -> pooledAllocatorMetric.usedDirectMemory());
+
+    if (verboseMetricsEnabled) {
+      int directArenaIndex = 0;
+      for (PoolArenaMetric metric : pooledAllocatorMetric.directArenas()) {
+        registerArenaMetric(metric, "directArena" + directArenaIndex);
+        directArenaIndex++;
+      }
+
+      int heapArenaIndex = 0;
+      for (PoolArenaMetric metric : pooledAllocatorMetric.heapArenas()) {
+        registerArenaMetric(metric, "heapArena" + heapArenaIndex);
+        heapArenaIndex++;
+      }
+    }
+  }
+
+  private void registerArenaMetric(PoolArenaMetric arenaMetric, String arenaName) {
+    for (String methodName : VERBOSE_METRICS) {
+      Method m;
+      try {
+        m = PoolArenaMetric.class.getMethod(methodName);
+      } catch (Exception e) {
+        // Failed to find metric related method, ignore this metric.
+        continue;
+      }
+
+      if (!Modifier.isPublic(m.getModifiers())) {
+        // Ignore non-public methods.
+        continue;
+      }
+
+      Class<?> returnType = m.getReturnType();
+      String metricName = MetricRegistry.name(metricPrefix, arenaName, m.getName());
+      if (returnType.equals(int.class)) {
+        allMetrics.put(metricName, (Gauge<Integer>) () -> {
+          try {
+            return (Integer) m.invoke(arenaMetric);
+          } catch (Exception e) {
+            return -1; // Swallow the exceptions.
+          }
+        });
+
+      } else if (returnType.equals(long.class)) {
+        allMetrics.put(metricName, (Gauge<Long>) () -> {
+          try {
+            return (Long) m.invoke(arenaMetric);
+          } catch (Exception e) {
+            return -1L; // Swallow the exceptions.
+          }
+        });
+      }
+    }
+  }
+
+  @Override
+  public Map<String, Metric> getMetrics() {
+    return Collections.unmodifiableMap(allMetrics);
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/445f1790/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java
----------------------------------------------------------------------
diff --git a/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java b/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java
index fa2ff42..91497b9 100644
--- a/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java
+++ b/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java
@@ -40,6 +40,7 @@ public class TransportConf {
   private final String SPARK_NETWORK_IO_MAXRETRIES_KEY;
   private final String SPARK_NETWORK_IO_RETRYWAIT_KEY;
   private final String SPARK_NETWORK_IO_LAZYFD_KEY;
+  private final String SPARK_NETWORK_VERBOSE_METRICS;
 
   private final ConfigProvider conf;
 
@@ -61,6 +62,7 @@ public class TransportConf {
     SPARK_NETWORK_IO_MAXRETRIES_KEY = getConfKey("io.maxRetries");
     SPARK_NETWORK_IO_RETRYWAIT_KEY = getConfKey("io.retryWait");
     SPARK_NETWORK_IO_LAZYFD_KEY = getConfKey("io.lazyFD");
+    SPARK_NETWORK_VERBOSE_METRICS = getConfKey("io.enableVerboseMetrics");
   }
 
   public int getInt(String name, int defaultValue) {
@@ -163,6 +165,14 @@ public class TransportConf {
   }
 
   /**
+   * Whether to track Netty memory detailed metrics. If true, the detailed metrics of Netty
+   * PoolByteBufAllocator will be gotten, otherwise only general memory usage will be tracked.
+   */
+  public boolean verboseMetrics() {
+    return conf.getBoolean(SPARK_NETWORK_VERBOSE_METRICS, false);
+  }
+
+  /**
    * Maximum number of retries when binding to a port before giving up.
    */
   public int portMaxRetries() {

http://git-wip-us.apache.org/repos/asf/spark/blob/445f1790/common/network-common/src/test/java/org/apache/spark/network/util/NettyMemoryMetricsSuite.java
----------------------------------------------------------------------
diff --git a/common/network-common/src/test/java/org/apache/spark/network/util/NettyMemoryMetricsSuite.java b/common/network-common/src/test/java/org/apache/spark/network/util/NettyMemoryMetricsSuite.java
new file mode 100644
index 0000000..400b385
--- /dev/null
+++ b/common/network-common/src/test/java/org/apache/spark/network/util/NettyMemoryMetricsSuite.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.network.util;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Metric;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.MetricSet;
+import org.apache.spark.network.TestUtils;
+import org.apache.spark.network.client.TransportClient;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.spark.network.TransportContext;
+import org.apache.spark.network.client.TransportClientFactory;
+import org.apache.spark.network.server.NoOpRpcHandler;
+import org.apache.spark.network.server.RpcHandler;
+import org.apache.spark.network.server.TransportServer;
+
+public class NettyMemoryMetricsSuite {
+
+  private TransportConf conf;
+  private TransportContext context;
+  private TransportServer server;
+  private TransportClientFactory clientFactory;
+
+  private void setUp(boolean enableVerboseMetrics) {
+    HashMap<String, String> configMap = new HashMap<>();
+    configMap.put("spark.shuffle.io.enableVerboseMetrics", String.valueOf(enableVerboseMetrics));
+    conf = new TransportConf("shuffle", new MapConfigProvider(configMap));
+    RpcHandler rpcHandler = new NoOpRpcHandler();
+    context = new TransportContext(conf, rpcHandler);
+    server = context.createServer();
+    clientFactory = context.createClientFactory();
+  }
+
+  @After
+  public void tearDown() {
+    if (clientFactory != null) {
+      JavaUtils.closeQuietly(clientFactory);
+      clientFactory = null;
+    }
+
+    if (server != null) {
+      JavaUtils.closeQuietly(server);
+      server = null;
+    }
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testGeneralNettyMemoryMetrics() throws IOException, InterruptedException {
+    setUp(false);
+
+    MetricSet serverMetrics = server.getAllMetrics();
+    Assert.assertNotNull(serverMetrics);
+    Assert.assertNotNull(serverMetrics.getMetrics());
+    Assert.assertNotEquals(serverMetrics.getMetrics().size(), 0);
+
+    Map<String, Metric> serverMetricMap = serverMetrics.getMetrics();
+    serverMetricMap.forEach((name, metric) ->
+      Assert.assertTrue(name.startsWith("shuffle-server"))
+    );
+
+    MetricSet clientMetrics = clientFactory.getAllMetrics();
+    Assert.assertNotNull(clientMetrics);
+    Assert.assertNotNull(clientMetrics.getMetrics());
+    Assert.assertNotEquals(clientMetrics.getMetrics().size(), 0);
+
+    Map<String, Metric> clientMetricMap = clientMetrics.getMetrics();
+    clientMetricMap.forEach((name, metrics) ->
+      Assert.assertTrue(name.startsWith("shuffle-client"))
+    );
+
+    // Make sure general metrics existed.
+    String heapMemoryMetric = "usedHeapMemory";
+    String directMemoryMetric = "usedDirectMemory";
+    Assert.assertNotNull(serverMetricMap.get(
+      MetricRegistry.name("shuffle-server", heapMemoryMetric)));
+    Assert.assertNotNull(serverMetricMap.get(
+      MetricRegistry.name("shuffle-server", directMemoryMetric)));
+
+    Assert.assertNotNull(clientMetricMap.get(
+      MetricRegistry.name("shuffle-client", heapMemoryMetric)));
+    Assert.assertNotNull(clientMetricMap.get(
+      MetricRegistry.name("shuffle-client", directMemoryMetric)));
+
+    TransportClient client = null;
+    try {
+      client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
+      Assert.assertTrue(client.isActive());
+
+      Assert.assertTrue(((Gauge<Long>)serverMetricMap.get(
+        MetricRegistry.name("shuffle-server", heapMemoryMetric))).getValue() >= 0L);
+      Assert.assertTrue(((Gauge<Long>)serverMetricMap.get(
+        MetricRegistry.name("shuffle-server", directMemoryMetric))).getValue() >= 0L);
+
+      Assert.assertTrue(((Gauge<Long>)clientMetricMap.get(
+        MetricRegistry.name("shuffle-client", heapMemoryMetric))).getValue() >= 0L);
+      Assert.assertTrue(((Gauge<Long>)clientMetricMap.get(
+        MetricRegistry.name("shuffle-client", directMemoryMetric))).getValue() >= 0L);
+
+    } finally {
+      if (client != null) {
+        client.close();
+      }
+    }
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testAdditionalMetrics() throws IOException, InterruptedException {
+    setUp(true);
+
+    // Make sure additional metrics are added.
+    Map<String, Metric> serverMetricMap = server.getAllMetrics().getMetrics();
+    serverMetricMap.forEach((name, metric) -> {
+      Assert.assertTrue(name.startsWith("shuffle-server"));
+      String metricName = name.substring(name.lastIndexOf(".") + 1);
+      Assert.assertTrue(metricName.equals("usedDirectMemory")
+        || metricName.equals("usedHeapMemory")
+        || NettyMemoryMetrics.VERBOSE_METRICS.contains(metricName));
+    });
+
+    Map<String, Metric> clientMetricMap = clientFactory.getAllMetrics().getMetrics();
+    clientMetricMap.forEach((name, metric) -> {
+      Assert.assertTrue(name.startsWith("shuffle-client"));
+      String metricName = name.substring(name.lastIndexOf(".") + 1);
+      Assert.assertTrue(metricName.equals("usedDirectMemory")
+        || metricName.equals("usedHeapMemory")
+        || NettyMemoryMetrics.VERBOSE_METRICS.contains(metricName));
+    });
+
+    TransportClient client = null;
+    try {
+      client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
+      Assert.assertTrue(client.isActive());
+
+      String activeBytesMetric = "numActiveBytes";
+      Assert.assertTrue(((Gauge<Long>) serverMetricMap.get(MetricRegistry.name("shuffle-server",
+        "directArena0", activeBytesMetric))).getValue() >= 0L);
+
+      Assert.assertTrue(((Gauge<Long>) clientMetricMap.get(MetricRegistry.name("shuffle-client",
+        "directArena0", activeBytesMetric))).getValue() >= 0L);
+    } finally {
+      if (client != null) {
+        client.close();
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/445f1790/dev/deps/spark-deps-hadoop-2.6
----------------------------------------------------------------------
diff --git a/dev/deps/spark-deps-hadoop-2.6 b/dev/deps/spark-deps-hadoop-2.6
index e3b9ce0..9ac7538 100644
--- a/dev/deps/spark-deps-hadoop-2.6
+++ b/dev/deps/spark-deps-hadoop-2.6
@@ -146,7 +146,7 @@ metrics-jvm-3.1.2.jar
 minlog-1.3.0.jar
 mx4j-3.0.2.jar
 netty-3.9.9.Final.jar
-netty-all-4.0.43.Final.jar
+netty-all-4.0.47.Final.jar
 objenesis-2.1.jar
 opencsv-2.3.jar
 orc-core-1.4.0-nohive.jar

http://git-wip-us.apache.org/repos/asf/spark/blob/445f1790/dev/deps/spark-deps-hadoop-2.7
----------------------------------------------------------------------
diff --git a/dev/deps/spark-deps-hadoop-2.7 b/dev/deps/spark-deps-hadoop-2.7
index a3f3f32..d39747e 100644
--- a/dev/deps/spark-deps-hadoop-2.7
+++ b/dev/deps/spark-deps-hadoop-2.7
@@ -147,7 +147,7 @@ metrics-jvm-3.1.2.jar
 minlog-1.3.0.jar
 mx4j-3.0.2.jar
 netty-3.9.9.Final.jar
-netty-all-4.0.43.Final.jar
+netty-all-4.0.47.Final.jar
 objenesis-2.1.jar
 opencsv-2.3.jar
 orc-core-1.4.0-nohive.jar

http://git-wip-us.apache.org/repos/asf/spark/blob/445f1790/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index eab8056..09794c1 100644
--- a/pom.xml
+++ b/pom.xml
@@ -574,7 +574,7 @@
       <dependency>
         <groupId>io.netty</groupId>
         <artifactId>netty-all</artifactId>
-        <version>4.0.43.Final</version>
+        <version>4.0.47.Final</version>
       </dependency>
       <dependency>
         <groupId>io.netty</groupId>


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org