You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ec...@apache.org on 2015/09/17 00:31:08 UTC

hbase git commit: HBASE-14334 Move Memcached block cache in to it's own optional module.

Repository: hbase
Updated Branches:
  refs/heads/master 08eabb89f -> 7b08f4c8b


HBASE-14334 Move Memcached block cache in to it's own optional module.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7b08f4c8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7b08f4c8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7b08f4c8

Branch: refs/heads/master
Commit: 7b08f4c8be60582cd02ba31161be214c9c9d40f9
Parents: 08eabb8
Author: Elliott Clark <ec...@apache.org>
Authored: Fri Aug 28 16:13:36 2015 -0700
Committer: Elliott Clark <ec...@apache.org>
Committed: Wed Sep 16 15:26:38 2015 -0700

----------------------------------------------------------------------
 hbase-assembly/pom.xml                          |   5 +
 .../src/main/assembly/hadoop-two-compat.xml     |   1 +
 hbase-external-blockcache/pom.xml               | 382 +++++++++++++++++++
 .../hbase/io/hfile/MemcachedBlockCache.java     | 282 ++++++++++++++
 hbase-server/pom.xml                            |   6 +-
 .../hadoop/hbase/io/hfile/CacheConfig.java      |  16 +-
 .../hbase/io/hfile/MemcachedBlockCache.java     | 282 --------------
 pom.xml                                         |   6 +
 8 files changed, 691 insertions(+), 289 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7b08f4c8/hbase-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 69c4989..4851391 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -186,6 +186,11 @@
       <version>${project.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-external-blockcache</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
        <groupId>org.apache.hbase</groupId>
        <artifactId>hbase-testing-util</artifactId>
        <version>${project.version}</version>

http://git-wip-us.apache.org/repos/asf/hbase/blob/7b08f4c8/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
----------------------------------------------------------------------
diff --git a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
index ba28251..9ef624c 100644
--- a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
+++ b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
@@ -46,6 +46,7 @@
         <include>org.apache.hbase:hbase-server</include>
         <include>org.apache.hbase:hbase-shell</include>
         <include>org.apache.hbase:hbase-thrift</include>
+        <include>org.apache.hbase:hbase-external-blockcache</include>
       </includes>
       <!-- Binaries for the dependencies also go in the hbase-jars directory -->
       <binaries>

http://git-wip-us.apache.org/repos/asf/hbase/blob/7b08f4c8/hbase-external-blockcache/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-external-blockcache/pom.xml b/hbase-external-blockcache/pom.xml
new file mode 100644
index 0000000..a46f1a5
--- /dev/null
+++ b/hbase-external-blockcache/pom.xml
@@ -0,0 +1,382 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <artifactId>hbase</artifactId>
+    <groupId>org.apache.hbase</groupId>
+    <version>2.0.0-SNAPSHOT</version>
+    <relativePath>..</relativePath>
+  </parent>
+  <artifactId>hbase-external-blockcache</artifactId>
+  <name>Apache HBase - External Block Cache</name>
+  <description>
+    HBase module that provides out of process block cache.
+    Currently Memcached is the reference implementation for external block cache.
+
+    External block caches allow HBase to take advantage of other more complex caches that can live
+    longer than the HBase regionserver process and are not necessarily tied to a single computer
+    life time. However external block caches add in extra operational overhead.
+  </description>
+
+  <build>
+    <resources>
+      <resource>
+        <directory>src/main/resources/</directory>
+        <includes>
+          <include>hbase-default.xml</include>
+        </includes>
+      </resource>
+    </resources>
+    <testResources>
+      <testResource>
+        <directory>src/test/resources/META-INF/</directory>
+        <targetPath>META-INF/</targetPath>
+        <includes>
+          <include>NOTICE</include>
+        </includes>
+        <filtering>true</filtering>
+      </testResource>
+    </testResources>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-site-plugin</artifactId>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
+      <plugin>
+        <!--Make it so assembly:single does nothing in here-->
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <skipAssembly>true</skipAssembly>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <executions>
+          <!-- Add the generated sources -->
+          <execution>
+            <id>versionInfo-source</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>add-source</goal>
+            </goals>
+            <configuration>
+              <sources>
+                <source>${project.build.directory}/generated-sources/java</source>
+              </sources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <properties>
+            <property>
+              <name>listener</name>
+              <value>org.apache.hadoop.hbase.ResourceCheckerJUnitListener</value>
+            </property>
+          </properties>
+        </configuration>
+        <!-- Always skip the second part executions, since we only run
+        simple unit tests in this module -->
+        <executions>
+          <execution>
+            <id>secondPartTestsExecution</id>
+            <phase>test</phase>
+            <goals>
+              <goal>test</goal>
+            </goals>
+            <configuration>
+              <skip>true</skip>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <!-- Make a jar and put the sources in the jar -->
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-source-plugin</artifactId>
+        <configuration>
+          <excludeResources>true</excludeResources>
+          <includes>
+            <include>src/main/java</include>
+            <include>${project.build.outputDirectory}/META-INF</include>
+          </includes>
+        </configuration>
+      </plugin>
+    </plugins>
+    <pluginManagement>
+      <plugins>
+        <!--This plugin's configuration is used to store Eclipse m2e settings
+             only. It has no influence on the Maven build itself. -->
+        <plugin>
+          <groupId>org.eclipse.m2e</groupId>
+          <artifactId>lifecycle-mapping</artifactId>
+          <version>1.0.0</version>
+          <configuration>
+            <lifecycleMappingMetadata>
+              <pluginExecutions>
+                <pluginExecution>
+                  <pluginExecutionFilter>
+                    <groupId>org.apache.maven.plugins</groupId>
+                    <artifactId>maven-antrun-plugin</artifactId>
+                    <versionRange>[${maven.antrun.version}]</versionRange>
+                    <goals>
+                      <goal>run</goal>
+                    </goals>
+                  </pluginExecutionFilter>
+                  <action>
+                    <execute/>
+                  </action>
+                </pluginExecution>
+                <pluginExecution>
+                  <pluginExecutionFilter>
+                    <groupId>org.apache.maven.plugins</groupId>
+                    <artifactId>maven-dependency-plugin</artifactId>
+                    <versionRange>[2.8,)</versionRange>
+                    <goals>
+                      <goal>build-classpath</goal>
+                    </goals>
+                  </pluginExecutionFilter>
+                  <action>
+                    <ignore></ignore>
+                  </action>
+                </pluginExecution>
+                <pluginExecution>
+                  <pluginExecutionFilter>
+                    <groupId>org.apache.maven.plugins</groupId>
+                    <artifactId>maven-compiler-plugin</artifactId>
+                    <versionRange>[3.2,)</versionRange>
+                    <goals>
+                      <goal>compile</goal>
+                    </goals>
+                  </pluginExecutionFilter>
+                  <action>
+                    <ignore></ignore>
+                  </action>
+                </pluginExecution>
+              </pluginExecutions>
+            </lifecycleMappingMetadata>
+          </configuration>
+        </plugin>
+      </plugins>
+    </pluginManagement>
+  </build>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>net.spy</groupId>
+      <artifactId>spymemcached</artifactId>
+      <optional>true</optional>
+    </dependency>
+  </dependencies>
+
+  <profiles>
+    <!-- Needs to make the profile in apache parent pom -->
+    <profile>
+      <id>apache-release</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-resources-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>license-javadocs</id>
+                <phase>prepare-package</phase>
+                <goals>
+                  <goal>copy-resources</goal>
+                </goals>
+                <configuration>
+                  <outputDirectory>${project.build.directory}/apidocs</outputDirectory>
+                  <resources>
+                    <resource>
+                      <directory>src/main/javadoc/META-INF/</directory>
+                      <targetPath>META-INF/</targetPath>
+                      <includes>
+                        <include>NOTICE</include>
+                      </includes>
+                      <filtering>true</filtering>
+                    </resource>
+                  </resources>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+    <!-- Skip the tests in this module -->
+    <profile>
+      <id>skipCommonTests</id>
+      <activation>
+        <property>
+          <name>skipCommonTests</name>
+        </property>
+      </activation>
+      <properties>
+        <surefire.skipFirstPart>true</surefire.skipFirstPart>
+      </properties>
+    </profile>
+
+    <!-- profile against Hadoop 1.1.x: This is the default. It has to have the same
+    activation property as the parent Hadoop 1.1.x profile to make sure it gets run at
+    the same time. -->
+    <profile>
+      <id>hadoop-1.1</id>
+      <activation>
+        <property>
+          <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+          <!--h1--><name>hadoop.profile</name><value>1.1</value>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-core</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+
+    <!-- profile against Hadoop 1.0.x:
+          mvn -Dhadoop.profile=1.0
+    -->
+    <profile>
+      <id>hadoop-1.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>1.0</value>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-core</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>commons-io</groupId>
+          <artifactId>commons-io</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+
+    <!--
+      profile for building against Hadoop 2.0.0-alpha. Activate using:
+       mvn -Dhadoop.profile=2.0
+    -->
+    <profile>
+      <id>hadoop-2.0</id>
+      <activation>
+        <property>
+          <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+          <!--h2--><name>!hadoop.profile</name>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </dependency>
+      </dependencies>
+      <build>
+        <plugins>
+          <plugin>
+            <artifactId>maven-dependency-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>create-mrapp-generated-classpath</id>
+                <phase>generate-test-resources</phase>
+                <goals>
+                  <goal>build-classpath</goal>
+                </goals>
+                <configuration>
+                  <!-- needed to run the unit test for DS to generate
+                  the required classpath that is required in the env
+                  of the launch container in the mini mr/yarn cluster
+                  -->
+                  <outputFile>${project.build.directory}/test-classes/mrapp-generated-classpath</outputFile>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+
+    <!--
+      profile for building against Hadoop 3.0.x. Activate using:
+       mvn -Dhadoop.profile=3.0
+    -->
+    <profile>
+      <id>hadoop-3.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>3.0</value>
+        </property>
+      </activation>
+      <properties>
+        <hadoop.version>3.0-SNAPSHOT</hadoop.version>
+      </properties>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </dependency>
+      </dependencies>
+      <build>
+        <plugins>
+          <plugin>
+            <artifactId>maven-dependency-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>create-mrapp-generated-classpath</id>
+                <phase>generate-test-resources</phase>
+                <goals>
+                  <goal>build-classpath</goal>
+                </goals>
+                <configuration>
+                  <!-- needed to run the unit test for DS to generate
+                  the required classpath that is required in the env
+                  of the launch container in the mini mr/yarn cluster
+                  -->
+                  <outputFile>${project.build.directory}/test-classes/mrapp-generated-classpath</outputFile>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+</project>

http://git-wip-us.apache.org/repos/asf/hbase/blob/7b08f4c8/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
new file mode 100644
index 0000000..f820193
--- /dev/null
+++ b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
@@ -0,0 +1,282 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.hbase.io.hfile;
+
+import net.spy.memcached.CachedData;
+import net.spy.memcached.ConnectionFactoryBuilder;
+import net.spy.memcached.FailureMode;
+import net.spy.memcached.MemcachedClient;
+import net.spy.memcached.transcoders.Transcoder;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
+import org.apache.hadoop.hbase.util.Addressing;
+import org.apache.htrace.Trace;
+import org.apache.htrace.TraceScope;
+
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.concurrent.ExecutionException;
+
+/**
+ * Class to store blocks into memcached.
+ * This should only be used on a cluster of Memcached daemons that are tuned well and have a
+ * good network connection to the HBase regionservers. Any other use will likely slow down HBase
+ * greatly.
+ */
+@InterfaceAudience.Private
+public class MemcachedBlockCache implements BlockCache {
+  private static final Log LOG = LogFactory.getLog(MemcachedBlockCache.class.getName());
+
+  // Some memcache versions won't take more than 1024 * 1024. So set the limit below
+  // that just in case this client is used with those versions.
+  public static final int MAX_SIZE = 1020 * 1024;
+
+  // Config key for what memcached servers to use.
+  // They should be specified in a comma sperated list with ports.
+  // like:
+  //
+  // host1:11211,host3:8080,host4:11211
+  public static final String MEMCACHED_CONFIG_KEY = "hbase.cache.memcached.servers";
+  public static final String MEMCACHED_TIMEOUT_KEY = "hbase.cache.memcached.timeout";
+  public static final String MEMCACHED_OPTIMEOUT_KEY = "hbase.cache.memcached.optimeout";
+  public static final long MEMCACHED_DEFAULT_TIMEOUT = 500;
+
+  private final MemcachedClient client;
+  private final HFileBlockTranscoder tc = new HFileBlockTranscoder();
+  private final CacheStats cacheStats = new CacheStats("MemcachedBlockCache");
+
+  public MemcachedBlockCache(Configuration c) throws IOException {
+    LOG.info("Creating MemcachedBlockCache");
+
+    long opTimeout = c.getLong(MEMCACHED_OPTIMEOUT_KEY, MEMCACHED_DEFAULT_TIMEOUT);
+    long queueTimeout = c.getLong(MEMCACHED_TIMEOUT_KEY, opTimeout + MEMCACHED_DEFAULT_TIMEOUT);
+
+    ConnectionFactoryBuilder builder = new ConnectionFactoryBuilder()
+        .setOpTimeout(opTimeout)
+        .setOpQueueMaxBlockTime(queueTimeout) // Cap the max time before anything times out
+        .setFailureMode(FailureMode.Redistribute)
+        .setShouldOptimize(true)              // When regions move lots of reads happen together
+                                              // So combining them into single requests is nice.
+        .setDaemon(true)                      // Don't keep threads around past the end of days.
+        .setUseNagleAlgorithm(false)          // Ain't nobody got time for that
+        .setReadBufferSize(HConstants.DEFAULT_BLOCKSIZE * 4 * 1024);  // 4 times larger than the
+                                                                      // default block just in case
+
+
+    // Assume only the localhost is serving memecached.
+    // A la mcrouter or co-locating memcached with split regionservers.
+    //
+    // If this config is a pool of memecached servers they will all be used according to the
+    // default hashing scheme defined by the memcache client. Spy Memecache client in this
+    // case.
+    String serverListString = c.get(MEMCACHED_CONFIG_KEY,"localhost:11211");
+    String[] servers = serverListString.split(",");
+    List<InetSocketAddress> serverAddresses = new ArrayList<InetSocketAddress>(servers.length);
+    for (String s:servers) {
+      serverAddresses.add(Addressing.createInetSocketAddressFromHostAndPortStr(s));
+    }
+
+    client = new MemcachedClient(builder.build(), serverAddresses);
+  }
+
+  @Override
+  public void cacheBlock(BlockCacheKey cacheKey,
+                         Cacheable buf,
+                         boolean inMemory,
+                         boolean cacheDataInL1) {
+    cacheBlock(cacheKey, buf);
+  }
+
+  @Override
+  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
+    if (buf instanceof HFileBlock) {
+      client.add(cacheKey.toString(), MAX_SIZE, (HFileBlock) buf, tc);
+    } else {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("MemcachedBlockCache can not cache Cacheable's of type "
+            + buf.getClass().toString());
+      }
+    }
+  }
+
+  @Override
+  public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching,
+                            boolean repeat, boolean updateCacheMetrics) {
+    // Assume that nothing is the block cache
+    HFileBlock result = null;
+
+    try (TraceScope traceScope = Trace.startSpan("MemcachedBlockCache.getBlock")) {
+      result = client.get(cacheKey.toString(), tc);
+    } catch (Exception e) {
+      // Catch a pretty broad set of exceptions to limit any changes in the memecache client
+      // and how it handles failures from leaking into the read path.
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Exception pulling from memcached [ "
+            + cacheKey.toString()
+            + " ]. Treating as a miss.", e);
+      }
+      result = null;
+    } finally {
+      // Update stats if this request doesn't have it turned off 100% of the time
+      if (updateCacheMetrics) {
+        if (result == null) {
+          cacheStats.miss(caching, cacheKey.isPrimary());
+        } else {
+          cacheStats.hit(caching, cacheKey.isPrimary());
+        }
+      }
+    }
+
+
+    return result;
+  }
+
+  @Override
+  public boolean evictBlock(BlockCacheKey cacheKey) {
+    try {
+      cacheStats.evict();
+      return client.delete(cacheKey.toString()).get();
+    } catch (InterruptedException e) {
+      LOG.warn("Error deleting " + cacheKey.toString(), e);
+      Thread.currentThread().interrupt();
+    } catch (ExecutionException e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Error deleting " + cacheKey.toString(), e);
+      }
+    }
+    return false;
+  }
+
+  /**
+   * This method does nothing so that memcached can handle all evictions.
+   */
+  @Override
+  public int evictBlocksByHfileName(String hfileName) {
+    return 0;
+  }
+
+  @Override
+  public CacheStats getStats() {
+    return cacheStats;
+  }
+
+  @Override
+  public void shutdown() {
+    client.shutdown();
+  }
+
+  @Override
+  public long size() {
+    return 0;
+  }
+
+  @Override
+  public long getFreeSize() {
+    return 0;
+  }
+
+  @Override
+  public long getCurrentSize() {
+    return 0;
+  }
+
+  @Override
+  public long getBlockCount() {
+    return 0;
+  }
+
+  @Override
+  public Iterator<CachedBlock> iterator() {
+    return new Iterator<CachedBlock>() {
+      @Override
+      public boolean hasNext() {
+        return false;
+      }
+
+      @Override
+      public CachedBlock next() {
+        throw new NoSuchElementException("MemcachedBlockCache can't iterate over blocks.");
+      }
+
+      @Override
+      public void remove() {
+
+      }
+    };
+  }
+
+  @Override
+  public BlockCache[] getBlockCaches() {
+    return null;
+  }
+
+  /**
+   * Class to encode and decode an HFileBlock to and from memecached's resulting byte arrays.
+   */
+  private static class HFileBlockTranscoder implements Transcoder<HFileBlock> {
+
+    @Override
+    public boolean asyncDecode(CachedData d) {
+      return false;
+    }
+
+    @Override
+    public CachedData encode(HFileBlock block) {
+      ByteBuffer bb = ByteBuffer.allocate(block.getSerializedLength());
+      block.serialize(bb);
+      return new CachedData(0, bb.array(), CachedData.MAX_SIZE);
+    }
+
+    @Override
+    public HFileBlock decode(CachedData d) {
+      try {
+        ByteBuff buf = new SingleByteBuff(ByteBuffer.wrap(d.getData()));
+        return (HFileBlock) HFileBlock.blockDeserializer.deserialize(buf, true,
+          MemoryType.EXCLUSIVE);
+      } catch (IOException e) {
+        LOG.warn("Error deserializing data from memcached",e);
+      }
+      return null;
+    }
+
+    @Override
+    public int getMaxSize() {
+      return MAX_SIZE;
+    }
+  }
+
+  @Override
+  public void returnBlock(BlockCacheKey cacheKey, Cacheable block) {
+    // Not doing reference counting. All blocks here are EXCLUSIVE
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/7b08f4c8/hbase-server/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 412582c..2152c19 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -535,11 +535,7 @@
       <groupId>io.netty</groupId>
       <artifactId>netty-all</artifactId>
     </dependency>
-    <dependency>
-      <groupId>net.spy</groupId>
-      <artifactId>spymemcached</artifactId>
-      <optional>true</optional>
-    </dependency>
+
     <!-- tracing Dependencies -->
     <dependency>
       <groupId>org.apache.htrace</groupId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/7b08f4c8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index 7b4f530..d6bdec0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -139,9 +139,16 @@ public class CacheConfig {
    * This is used for config.
    */
   private static enum ExternalBlockCaches {
-    memcached(MemcachedBlockCache.class);
+    memcached("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache");
     // TODO(eclark): Consider more. Redis, etc.
     Class<? extends BlockCache> clazz;
+    ExternalBlockCaches(String clazzName) {
+      try {
+        clazz = (Class<? extends BlockCache>) Class.forName(clazzName);
+      } catch (ClassNotFoundException cnef) {
+        clazz = null;
+      }
+    }
     ExternalBlockCaches(Class<? extends BlockCache> clazz) {
       this.clazz = clazz;
     }
@@ -572,7 +579,12 @@ public class CacheConfig {
     try {
       klass = ExternalBlockCaches.valueOf(c.get(EXTERNAL_BLOCKCACHE_CLASS_KEY, "memcache")).clazz;
     } catch (IllegalArgumentException exception) {
-      klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, MemcachedBlockCache.class);
+      try {
+        klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, Class.forName(
+            "org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache"));
+      } catch (ClassNotFoundException e) {
+        return null;
+      }
     }
 
     // Now try and create an instance of the block cache.

http://git-wip-us.apache.org/repos/asf/hbase/blob/7b08f4c8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
deleted file mode 100644
index f820193..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
+++ /dev/null
@@ -1,282 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hadoop.hbase.io.hfile;
-
-import net.spy.memcached.CachedData;
-import net.spy.memcached.ConnectionFactoryBuilder;
-import net.spy.memcached.FailureMode;
-import net.spy.memcached.MemcachedClient;
-import net.spy.memcached.transcoders.Transcoder;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-import org.apache.hadoop.hbase.nio.ByteBuff;
-import org.apache.hadoop.hbase.nio.SingleByteBuff;
-import org.apache.hadoop.hbase.util.Addressing;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.NoSuchElementException;
-import java.util.concurrent.ExecutionException;
-
-/**
- * Class to store blocks into memcached.
- * This should only be used on a cluster of Memcached daemons that are tuned well and have a
- * good network connection to the HBase regionservers. Any other use will likely slow down HBase
- * greatly.
- */
-@InterfaceAudience.Private
-public class MemcachedBlockCache implements BlockCache {
-  private static final Log LOG = LogFactory.getLog(MemcachedBlockCache.class.getName());
-
-  // Some memcache versions won't take more than 1024 * 1024. So set the limit below
-  // that just in case this client is used with those versions.
-  public static final int MAX_SIZE = 1020 * 1024;
-
-  // Config key for what memcached servers to use.
-  // They should be specified in a comma sperated list with ports.
-  // like:
-  //
-  // host1:11211,host3:8080,host4:11211
-  public static final String MEMCACHED_CONFIG_KEY = "hbase.cache.memcached.servers";
-  public static final String MEMCACHED_TIMEOUT_KEY = "hbase.cache.memcached.timeout";
-  public static final String MEMCACHED_OPTIMEOUT_KEY = "hbase.cache.memcached.optimeout";
-  public static final long MEMCACHED_DEFAULT_TIMEOUT = 500;
-
-  private final MemcachedClient client;
-  private final HFileBlockTranscoder tc = new HFileBlockTranscoder();
-  private final CacheStats cacheStats = new CacheStats("MemcachedBlockCache");
-
-  public MemcachedBlockCache(Configuration c) throws IOException {
-    LOG.info("Creating MemcachedBlockCache");
-
-    long opTimeout = c.getLong(MEMCACHED_OPTIMEOUT_KEY, MEMCACHED_DEFAULT_TIMEOUT);
-    long queueTimeout = c.getLong(MEMCACHED_TIMEOUT_KEY, opTimeout + MEMCACHED_DEFAULT_TIMEOUT);
-
-    ConnectionFactoryBuilder builder = new ConnectionFactoryBuilder()
-        .setOpTimeout(opTimeout)
-        .setOpQueueMaxBlockTime(queueTimeout) // Cap the max time before anything times out
-        .setFailureMode(FailureMode.Redistribute)
-        .setShouldOptimize(true)              // When regions move lots of reads happen together
-                                              // So combining them into single requests is nice.
-        .setDaemon(true)                      // Don't keep threads around past the end of days.
-        .setUseNagleAlgorithm(false)          // Ain't nobody got time for that
-        .setReadBufferSize(HConstants.DEFAULT_BLOCKSIZE * 4 * 1024);  // 4 times larger than the
-                                                                      // default block just in case
-
-
-    // Assume only the localhost is serving memecached.
-    // A la mcrouter or co-locating memcached with split regionservers.
-    //
-    // If this config is a pool of memecached servers they will all be used according to the
-    // default hashing scheme defined by the memcache client. Spy Memecache client in this
-    // case.
-    String serverListString = c.get(MEMCACHED_CONFIG_KEY,"localhost:11211");
-    String[] servers = serverListString.split(",");
-    List<InetSocketAddress> serverAddresses = new ArrayList<InetSocketAddress>(servers.length);
-    for (String s:servers) {
-      serverAddresses.add(Addressing.createInetSocketAddressFromHostAndPortStr(s));
-    }
-
-    client = new MemcachedClient(builder.build(), serverAddresses);
-  }
-
-  @Override
-  public void cacheBlock(BlockCacheKey cacheKey,
-                         Cacheable buf,
-                         boolean inMemory,
-                         boolean cacheDataInL1) {
-    cacheBlock(cacheKey, buf);
-  }
-
-  @Override
-  public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
-    if (buf instanceof HFileBlock) {
-      client.add(cacheKey.toString(), MAX_SIZE, (HFileBlock) buf, tc);
-    } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("MemcachedBlockCache can not cache Cacheable's of type "
-            + buf.getClass().toString());
-      }
-    }
-  }
-
-  @Override
-  public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching,
-                            boolean repeat, boolean updateCacheMetrics) {
-    // Assume that nothing is the block cache
-    HFileBlock result = null;
-
-    try (TraceScope traceScope = Trace.startSpan("MemcachedBlockCache.getBlock")) {
-      result = client.get(cacheKey.toString(), tc);
-    } catch (Exception e) {
-      // Catch a pretty broad set of exceptions to limit any changes in the memecache client
-      // and how it handles failures from leaking into the read path.
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Exception pulling from memcached [ "
-            + cacheKey.toString()
-            + " ]. Treating as a miss.", e);
-      }
-      result = null;
-    } finally {
-      // Update stats if this request doesn't have it turned off 100% of the time
-      if (updateCacheMetrics) {
-        if (result == null) {
-          cacheStats.miss(caching, cacheKey.isPrimary());
-        } else {
-          cacheStats.hit(caching, cacheKey.isPrimary());
-        }
-      }
-    }
-
-
-    return result;
-  }
-
-  @Override
-  public boolean evictBlock(BlockCacheKey cacheKey) {
-    try {
-      cacheStats.evict();
-      return client.delete(cacheKey.toString()).get();
-    } catch (InterruptedException e) {
-      LOG.warn("Error deleting " + cacheKey.toString(), e);
-      Thread.currentThread().interrupt();
-    } catch (ExecutionException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Error deleting " + cacheKey.toString(), e);
-      }
-    }
-    return false;
-  }
-
-  /**
-   * This method does nothing so that memcached can handle all evictions.
-   */
-  @Override
-  public int evictBlocksByHfileName(String hfileName) {
-    return 0;
-  }
-
-  @Override
-  public CacheStats getStats() {
-    return cacheStats;
-  }
-
-  @Override
-  public void shutdown() {
-    client.shutdown();
-  }
-
-  @Override
-  public long size() {
-    return 0;
-  }
-
-  @Override
-  public long getFreeSize() {
-    return 0;
-  }
-
-  @Override
-  public long getCurrentSize() {
-    return 0;
-  }
-
-  @Override
-  public long getBlockCount() {
-    return 0;
-  }
-
-  @Override
-  public Iterator<CachedBlock> iterator() {
-    return new Iterator<CachedBlock>() {
-      @Override
-      public boolean hasNext() {
-        return false;
-      }
-
-      @Override
-      public CachedBlock next() {
-        throw new NoSuchElementException("MemcachedBlockCache can't iterate over blocks.");
-      }
-
-      @Override
-      public void remove() {
-
-      }
-    };
-  }
-
-  @Override
-  public BlockCache[] getBlockCaches() {
-    return null;
-  }
-
-  /**
-   * Class to encode and decode an HFileBlock to and from memecached's resulting byte arrays.
-   */
-  private static class HFileBlockTranscoder implements Transcoder<HFileBlock> {
-
-    @Override
-    public boolean asyncDecode(CachedData d) {
-      return false;
-    }
-
-    @Override
-    public CachedData encode(HFileBlock block) {
-      ByteBuffer bb = ByteBuffer.allocate(block.getSerializedLength());
-      block.serialize(bb);
-      return new CachedData(0, bb.array(), CachedData.MAX_SIZE);
-    }
-
-    @Override
-    public HFileBlock decode(CachedData d) {
-      try {
-        ByteBuff buf = new SingleByteBuff(ByteBuffer.wrap(d.getData()));
-        return (HFileBlock) HFileBlock.blockDeserializer.deserialize(buf, true,
-          MemoryType.EXCLUSIVE);
-      } catch (IOException e) {
-        LOG.warn("Error deserializing data from memcached",e);
-      }
-      return null;
-    }
-
-    @Override
-    public int getMaxSize() {
-      return MAX_SIZE;
-    }
-  }
-
-  @Override
-  public void returnBlock(BlockCacheKey cacheKey, Cacheable block) {
-    // Not doing reference counting. All blocks here are EXCLUSIVE
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/7b08f4c8/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index de1f5e7..46cd59e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -67,6 +67,7 @@
     <module>hbase-annotations</module>
     <module>hbase-rest</module>
     <module>hbase-checkstyle</module>
+    <module>hbase-external-blockcache</module>
     <module>hbase-shaded</module>
     <module>hbase-spark</module>
   </modules>
@@ -1435,6 +1436,11 @@
         <version>${project.version}</version>
       </dependency>
       <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-external-blockcache</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
         <artifactId>hbase-it</artifactId>
         <groupId>org.apache.hbase</groupId>
         <version>${project.version}</version>