You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by nd...@apache.org on 2021/04/15 16:08:19 UTC
[hbase] branch master updated: HBASE-25770 Http InfoServers should
honor gzip encoding when requested (#3159)
This is an automated email from the ASF dual-hosted git repository.
ndimiduk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/master by this push:
new bc52bca HBASE-25770 Http InfoServers should honor gzip encoding when requested (#3159)
bc52bca is described below
commit bc52bca7413a1a85b5f0ebd5a3b5db5d67ab37de
Author: Nick Dimiduk <nd...@apache.org>
AuthorDate: Thu Apr 15 09:07:13 2021 -0700
HBASE-25770 Http InfoServers should honor gzip encoding when requested (#3159)
Signed-off-by: Duo Zhang <zh...@apache.org>
Signed-off-by: Josh Elser <el...@apache.org>
---
hbase-http/pom.xml | 5 ++
.../org/apache/hadoop/hbase/http/HttpServer.java | 20 ++++++-
.../apache/hadoop/hbase/http/TestHttpServer.java | 69 +++++++++++++++++++++-
.../org/apache/hadoop/hbase/master/HMaster.java | 9 ++-
4 files changed, 95 insertions(+), 8 deletions(-)
diff --git a/hbase-http/pom.xml b/hbase-http/pom.xml
index 8e3251d..30204bb 100644
--- a/hbase-http/pom.xml
+++ b/hbase-http/pom.xml
@@ -221,6 +221,11 @@
<scope>test</scope>
</dependency>
<dependency>
+ <groupId>org.hamcrest</groupId>
+ <artifactId>hamcrest-library</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<scope>test</scope>
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 41844e7..1d51694 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -66,7 +66,6 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.org.eclipse.jetty.http.HttpVersion;
@@ -81,6 +80,7 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.server.SslConnectionFactory
import org.apache.hbase.thirdparty.org.eclipse.jetty.server.handler.ContextHandlerCollection;
import org.apache.hbase.thirdparty.org.eclipse.jetty.server.handler.HandlerCollection;
import org.apache.hbase.thirdparty.org.eclipse.jetty.server.handler.RequestLogHandler;
+import org.apache.hbase.thirdparty.org.eclipse.jetty.server.handler.gzip.GzipHandler;
import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.DefaultServlet;
import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.FilterHolder;
import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.FilterMapping;
@@ -575,6 +575,7 @@ public class HttpServer implements FilterContainer {
this.findPort = b.findPort;
this.authenticationEnabled = b.securityEnabled;
initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs, b);
+ this.webServer.setHandler(buildGzipHandler(this.webServer.getHandler()));
}
private void initializeWebServer(String name, String hostName,
@@ -662,6 +663,23 @@ public class HttpServer implements FilterContainer {
return ctx;
}
+ /**
+ * Construct and configure an instance of {@link GzipHandler}. With complex
+ * multi-{@link WebAppContext} configurations, it's easiest to apply this handler directly to the
+ * instance of {@link Server} near the end of its configuration, something like
+ * <pre>
+ * Server server = new Server();
+ * //...
+ * server.setHandler(buildGzipHandler(server.getHandler()));
+ * server.start();
+ * </pre>
+ */
+ public static GzipHandler buildGzipHandler(final Handler wrapped) {
+ final GzipHandler gzipHandler = new GzipHandler();
+ gzipHandler.setHandler(wrapped);
+ return gzipHandler;
+ }
+
private static void addNoCacheFilter(WebAppContext ctxt) {
defineFilter(ctxt, NO_CACHE_FILTER, NoCacheFilter.class.getName(),
Collections.<String, String> emptyMap(), new String[] { "/*" });
diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java
index acd2735..0d51870 100644
--- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java
+++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -17,11 +17,16 @@
*/
package org.apache.hadoop.hbase.http;
+import static org.hamcrest.Matchers.greaterThan;
+import java.io.BufferedReader;
import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
+import java.nio.CharBuffer;
import java.util.Arrays;
import java.util.Collections;
import java.util.Enumeration;
@@ -56,6 +61,13 @@ import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpHeaders;
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.hamcrest.MatcherAssert;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
@@ -66,7 +78,6 @@ import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
import org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector;
import org.apache.hbase.thirdparty.org.eclipse.jetty.util.ajax.JSON;
@@ -269,6 +280,60 @@ public class TestHttpServer extends HttpServerFunctionalTest {
// assertEquals("text/html; charset=utf-8", conn.getContentType());
}
+ @Test
+ public void testNegotiatesEncodingGzip() throws IOException {
+ final InputStream stream = ClassLoader.getSystemResourceAsStream("webapps/static/test.css");
+ assertNotNull(stream);
+ final String sourceContent = readFully(stream);
+
+ try (final CloseableHttpClient client = HttpClients.createMinimal()) {
+ final HttpGet request = new HttpGet(new URL(baseUrl, "/static/test.css").toString());
+
+ request.setHeader(HttpHeaders.ACCEPT_ENCODING, null);
+ final long unencodedContentLength;
+ try (final CloseableHttpResponse response = client.execute(request)) {
+ final HttpEntity entity = response.getEntity();
+ assertNotNull(entity);
+ assertNull(entity.getContentEncoding());
+ unencodedContentLength = entity.getContentLength();
+ MatcherAssert.assertThat(unencodedContentLength, greaterThan(0L));
+ final String unencodedEntityBody = readFully(entity.getContent());
+ assertEquals(sourceContent, unencodedEntityBody);
+ }
+
+ request.setHeader(HttpHeaders.ACCEPT_ENCODING, "gzip");
+ final long encodedContentLength;
+ try (final CloseableHttpResponse response = client.execute(request)) {
+ final HttpEntity entity = response.getEntity();
+ assertNotNull(entity);
+ assertNotNull(entity.getContentEncoding());
+ assertEquals("gzip", entity.getContentEncoding().getValue());
+ encodedContentLength = entity.getContentLength();
+ MatcherAssert.assertThat(encodedContentLength, greaterThan(0L));
+ final String encodedEntityBody = readFully(entity.getContent());
+ // the encoding/decoding process, as implemented in this specific combination of dependency
+ // versions, does not perfectly preserve trailing whitespace. thus, `trim()`.
+ assertEquals(sourceContent.trim(), encodedEntityBody.trim());
+ }
+ MatcherAssert.assertThat(unencodedContentLength, greaterThan(encodedContentLength));
+ }
+ }
+
+ private static String readFully(final InputStream input) throws IOException {
+ // TODO: when the time comes, delete me and replace with a JDK11 IO helper API.
+ try (final BufferedReader reader = new BufferedReader(new InputStreamReader(input))) {
+ final StringBuilder sb = new StringBuilder();
+ final CharBuffer buffer = CharBuffer.allocate(1024 * 2);
+ while (reader.read(buffer) > 0) {
+ sb.append(buffer);
+ buffer.clear();
+ }
+ return sb.toString();
+ } finally {
+ input.close();
+ }
+ }
+
/**
* Dummy filter that mimics as an authentication filter. Obtains user identity
* from the request parameter user.name. Wraps around the request so that
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 4c3ff89..fe4ada4c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -21,7 +21,6 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED
import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS;
import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK;
import static org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY;
-
import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.reflect.Constructor;
@@ -88,9 +87,9 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.exceptions.MasterStoppedException;
-import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorConfig;
import org.apache.hadoop.hbase.executor.ExecutorType;
import org.apache.hadoop.hbase.favored.FavoredNodesManager;
+import org.apache.hadoop.hbase.http.HttpServer;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
@@ -216,7 +215,6 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
@@ -227,7 +225,6 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server;
import org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector;
import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder;
import org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext;
-
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
@@ -552,7 +549,8 @@ public class HMaster extends HRegionServer implements MasterServices {
if (infoPort < 0 || infoServer == null) {
return -1;
}
- if(infoPort == infoServer.getPort()) {
+ if (infoPort == infoServer.getPort()) {
+ // server is already running
return infoPort;
}
final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");
@@ -574,6 +572,7 @@ public class HMaster extends HRegionServer implements MasterServices {
connector.setPort(infoPort);
masterJettyServer.addConnector(connector);
masterJettyServer.setStopAtShutdown(true);
+ masterJettyServer.setHandler(HttpServer.buildGzipHandler(masterJettyServer.getHandler()));
final String redirectHostname =
StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead;