You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2013/01/24 03:45:57 UTC
svn commit: r1437843 [1/2] - in
/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/
hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/
hadoop-hdfs/ h...
Author: szetszwo
Date: Thu Jan 24 02:45:45 2013
New Revision: 1437843
URL: http://svn.apache.org/viewvc?rev=1437843&view=rev
Log:
Merge r1432789 through r1437840 from trunk.
Removed:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolProtocolBuffers/overview.html
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/pom.xml
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/pom.xml
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1432789-1437840
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml Thu Jan 24 02:45:45 2013
@@ -359,6 +359,8 @@
<artifactId>apache-rat-plugin</artifactId>
<configuration>
<excludes>
+ <exclude>src/test/resources/classutils.txt</exclude>
+ <exclude>src/main/conf/httpfs-signature.secret</exclude>
</excludes>
</configuration>
</plugin>
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java Thu Jan 24 02:45:45 2013
@@ -29,6 +29,9 @@ import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import java.io.IOException;
import java.net.InetAddress;
+import java.net.UnknownHostException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Filter that resolves the requester hostname.
@@ -36,6 +39,7 @@ import java.net.InetAddress;
@InterfaceAudience.Private
public class HostnameFilter implements Filter {
static final ThreadLocal<String> HOSTNAME_TL = new ThreadLocal<String>();
+ private static final Logger log = LoggerFactory.getLogger(HostnameFilter.class);
/**
* Initializes the filter.
@@ -66,7 +70,19 @@ public class HostnameFilter implements F
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
try {
- String hostname = InetAddress.getByName(request.getRemoteAddr()).getCanonicalHostName();
+ String hostname;
+ try {
+ String address = request.getRemoteAddr();
+ if (address != null) {
+ hostname = InetAddress.getByName(address).getCanonicalHostName();
+ } else {
+ log.warn("Request remote address is NULL");
+ hostname = "???";
+ }
+ } catch (UnknownHostException ex) {
+ log.warn("Request remote address could not be resolved, {0}", ex.toString(), ex);
+ hostname = "???";
+ }
HOSTNAME_TL.set(hostname);
chain.doFilter(request, response);
} finally {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java Thu Jan 24 02:45:45 2013
@@ -64,4 +64,30 @@ public class TestHostnameFilter extends
filter.destroy();
}
+ @Test
+ public void testMissingHostname() throws Exception {
+ ServletRequest request = Mockito.mock(ServletRequest.class);
+ Mockito.when(request.getRemoteAddr()).thenReturn(null);
+
+ ServletResponse response = Mockito.mock(ServletResponse.class);
+
+ final AtomicBoolean invoked = new AtomicBoolean();
+
+ FilterChain chain = new FilterChain() {
+ @Override
+ public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
+ throws IOException, ServletException {
+ assertTrue(HostnameFilter.get().contains("???"));
+ invoked.set(true);
+ }
+ };
+
+ Filter filter = new HostnameFilter();
+ filter.init(null);
+ assertNull(HostnameFilter.get());
+ filter.doFilter(request, response, chain);
+ assertTrue(invoked.get());
+ assertNull(HostnameFilter.get());
+ filter.destroy();
+ }
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Jan 24 02:45:45 2013
@@ -309,6 +309,11 @@ Release 2.0.3-alpha - Unreleased
HDFS-4367. GetDataEncryptionKeyResponseProto does not handle null
response. (suresh)
+ HDFS-4364. GetLinkTargetResponseProto does not handle null path. (suresh)
+
+ HDFS-4369. GetBlockKeysResponseProto does not handle null response.
+ (suresh)
+
NEW FEATURES
HDFS-2656. Add libwebhdfs, a pure C client based on WebHDFS.
@@ -480,8 +485,22 @@ Release 2.0.3-alpha - Unreleased
HDFS-4381. Document fsimage format details in FSImageFormat class javadoc.
(Jing Zhao via suresh)
+ HDFS-4375. Use token request messages defined in hadoop common.
+ (suresh)
+
+ HDFS-4392. Use NetUtils#getFreeSocketPort in MiniDFSCluster.
+ (Andrew Purtell via suresh)
+
+ HDFS-4393. Make empty request and responses in protocol translators can be
+ static final members. (Brandon Li via suresh)
+
+ HDFS-4403. DFSClient can infer checksum type when not provided by reading
+ first byte (todd)
+
OPTIMIZATIONS
+ HDFS-3429. DataNode reads checksums even if client does not need them (todd)
+
BUG FIXES
HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever.
@@ -703,6 +722,12 @@ Release 2.0.3-alpha - Unreleased
HDFS-1245. Pluggable block id generation. (shv)
+ HDFS-4415. HostnameFilter should handle hostname resolution failures and
+ continue processing. (Robert Kanter via atm)
+
+ HDFS-4359. Slow RPC responses from NN can prevent metrics collection on
+ DNs. (liang xie via atm)
+
BREAKDOWN OF HDFS-3077 SUBTASKS
HDFS-3077. Quorum-based protocol for reading and writing edit logs.
@@ -805,9 +830,12 @@ Release 2.0.3-alpha - Unreleased
HDFS-4017. Unclosed FileInputStream in GetJournalEditServlet
(Chao Shi via todd)
- HDFS-4351. In BlockPlacementPolicyDefault.chooseTarget(..), numOfReplicas
+ HDFS-4351. In BlockPlacementPolicyDefault.chooseTarget(..), numOfReplicas
needs to be updated when avoiding stale nodes. (Andrew Wang via szetszwo)
+ HDFS-4399. Fix RAT warnings by excluding images sub-dir in docs. (Thomas
+ Graves via acmurthy)
+
Release 2.0.2-alpha - 2012-09-07
INCOMPATIBLE CHANGES
@@ -2185,6 +2213,18 @@ Release 2.0.0-alpha - 05-23-2012
HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm)
+Release 0.23.7 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
Release 0.23.6 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -2202,7 +2242,12 @@ Release 0.23.6 - UNRELEASED
HDFS-4248. Renaming directories may incorrectly remove the paths in leases
under the tree. (daryn via szetszwo)
-Release 0.23.5 - UNRELEASED
+ HDFS-4385. Maven RAT plugin is not checking all source files (tgraves)
+
+ HDFS-4426. Secondary namenode shuts down immediately after startup.
+ (Arpit Agarwal via suresh)
+
+Release 0.23.5 - 2012-11-28
INCOMPATIBLE CHANGES
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt Thu Jan 24 02:45:45 2013
@@ -242,3 +242,30 @@ For the org.apache.hadoop.util.bloom.* c
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
+
+For src/main/native/util/tree.h:
+
+/*-
+ * Copyright 2002 Niels Provos <pr...@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/pom.xml Thu Jan 24 02:45:45 2013
@@ -516,9 +516,12 @@ http://maven.apache.org/xsd/maven-4.0.0.
<exclude>src/test/resources/data*</exclude>
<exclude>src/test/resources/editsStored*</exclude>
<exclude>src/test/resources/empty-file</exclude>
+ <exclude>src/main/native/util/tree.h</exclude>
+ <exclude>src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj</exclude>
<exclude>src/main/webapps/datanode/robots.txt</exclude>
<exclude>src/main/docs/releasenotes.html</exclude>
<exclude>src/contrib/**</exclude>
+ <exclude>src/site/resources/images/*</exclude>
</excludes>
</configuration>
</plugin>
@@ -563,6 +566,9 @@ http://maven.apache.org/xsd/maven-4.0.0.
<exec executable="make" dir="${project.build.directory}/native" failonerror="true">
<arg line="VERBOSE=1"/>
</exec>
+ <!-- The second make is a workaround for HADOOP-9215. It can
+ be removed when version 2.6 of cmake is no longer supported . -->
+ <exec executable="make" dir="${project.build.directory}/native" failonerror="true"></exec>
</target>
</configuration>
</execution>
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake Thu Jan 24 02:45:45 2013
@@ -1,3 +1,20 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
#ifndef CONFIG_H
#define CONFIG_H
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml Thu Jan 24 02:45:45 2013
@@ -92,10 +92,11 @@ There is no provision within HDFS for cr
<section><title>Group Mapping</title>
<p>
-Once a username has been determined as described above, the list of groups is determined by a <em>group mapping
-service</em>, configured by the <code>hadoop.security.group.mapping</code> property.
-The default implementation, <code>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</code>, will shell out
-to the Unix <code>bash -c groups</code> command to resolve a list of groups for a user.
+Once a username has been determined as described above, the list of groups is
+determined by a <em>group mapping service</em>, configured by the
+<code>hadoop.security.group.mapping</code> property. Refer to the
+core-default.xml for details of the <code>hadoop.security.group.mapping</code>
+implementation.
</p>
<p>
An alternate implementation, which connects directly to an LDAP server to resolve the list of groups, is available
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1432789-1437840
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Thu Jan 24 02:45:45 2013
@@ -152,6 +152,7 @@ import org.apache.hadoop.security.token.
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenRenewer;
import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.DataChecksum.Type;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.Time;
@@ -1571,7 +1572,7 @@ public class DFSClient implements java.i
*/
public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException {
checkOpen();
- return getFileChecksum(src, namenode, socketFactory,
+ return getFileChecksum(src, clientName, namenode, socketFactory,
dfsClientConf.socketTimeout, getDataEncryptionKey(),
dfsClientConf.connectToDnViaHostname);
}
@@ -1614,9 +1615,16 @@ public class DFSClient implements java.i
/**
* Get the checksum of a file.
* @param src The file path
+ * @param clientName the name of the client requesting the checksum.
+ * @param namenode the RPC proxy for the namenode
+ * @param socketFactory to create sockets to connect to DNs
+ * @param socketTimeout timeout to use when connecting and waiting for a response
+ * @param encryptionKey the key needed to communicate with DNs in this cluster
+ * @param connectToDnViaHostname {@see #connectToDnViaHostname()}
* @return The checksum
*/
- public static MD5MD5CRC32FileChecksum getFileChecksum(String src,
+ static MD5MD5CRC32FileChecksum getFileChecksum(String src,
+ String clientName,
ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout,
DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
throws IOException {
@@ -1651,32 +1659,16 @@ public class DFSClient implements java.i
final int timeout = 3000 * datanodes.length + socketTimeout;
boolean done = false;
for(int j = 0; !done && j < datanodes.length; j++) {
- Socket sock = null;
DataOutputStream out = null;
DataInputStream in = null;
try {
//connect to a datanode
- sock = socketFactory.createSocket();
- String dnAddr = datanodes[j].getXferAddr(connectToDnViaHostname);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Connecting to datanode " + dnAddr);
- }
- NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
- sock.setSoTimeout(timeout);
-
- OutputStream unbufOut = NetUtils.getOutputStream(sock);
- InputStream unbufIn = NetUtils.getInputStream(sock);
- if (encryptionKey != null) {
- IOStreamPair encryptedStreams =
- DataTransferEncryptor.getEncryptedStreams(
- unbufOut, unbufIn, encryptionKey);
- unbufOut = encryptedStreams.out;
- unbufIn = encryptedStreams.in;
- }
- out = new DataOutputStream(new BufferedOutputStream(unbufOut,
+ IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
+ encryptionKey, datanodes[j], timeout);
+ out = new DataOutputStream(new BufferedOutputStream(pair.out,
HdfsConstants.SMALL_BUFFER_SIZE));
- in = new DataInputStream(unbufIn);
+ in = new DataInputStream(pair.in);
if (LOG.isDebugEnabled()) {
LOG.debug("write to " + datanodes[j] + ": "
@@ -1689,19 +1681,8 @@ public class DFSClient implements java.i
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
if (reply.getStatus() != Status.SUCCESS) {
- if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN
- && i > lastRetriedIndex) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
- + "for file " + src + " for block " + block
- + " from datanode " + datanodes[j]
- + ". Will retry the block once.");
- }
- lastRetriedIndex = i;
- done = true; // actually it's not done; but we'll retry
- i--; // repeat at i-th block
- refetchBlocks = true;
- break;
+ if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
+ throw new InvalidBlockTokenException();
} else {
throw new IOException("Bad response " + reply + " for block "
+ block + " from datanode " + datanodes[j]);
@@ -1733,8 +1714,18 @@ public class DFSClient implements java.i
md5.write(md5out);
// read crc-type
- final DataChecksum.Type ct = PBHelper.convert(checksumData
- .getCrcType());
+ final DataChecksum.Type ct;
+ if (checksumData.hasCrcType()) {
+ ct = PBHelper.convert(checksumData
+ .getCrcType());
+ } else {
+ LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
+ "inferring checksum by reading first byte");
+ ct = inferChecksumTypeByReading(
+ clientName, socketFactory, socketTimeout, lb, datanodes[j],
+ encryptionKey, connectToDnViaHostname);
+ }
+
if (i == 0) { // first block
crcType = ct;
} else if (crcType != DataChecksum.Type.MIXED
@@ -1752,12 +1743,25 @@ public class DFSClient implements java.i
}
LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
}
+ } catch (InvalidBlockTokenException ibte) {
+ if (i > lastRetriedIndex) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
+ + "for file " + src + " for block " + block
+ + " from datanode " + datanodes[j]
+ + ". Will retry the block once.");
+ }
+ lastRetriedIndex = i;
+ done = true; // actually it's not done; but we'll retry
+ i--; // repeat at i-th block
+ refetchBlocks = true;
+ break;
+ }
} catch (IOException ie) {
LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
} finally {
IOUtils.closeStream(in);
IOUtils.closeStream(out);
- IOUtils.closeSocket(sock);
}
}
@@ -1790,6 +1794,90 @@ public class DFSClient implements java.i
}
/**
+ * Connect to the given datanode's datantrasfer port, and return
+ * the resulting IOStreamPair. This includes encryption wrapping, etc.
+ */
+ private static IOStreamPair connectToDN(
+ SocketFactory socketFactory, boolean connectToDnViaHostname,
+ DataEncryptionKey encryptionKey, DatanodeInfo dn, int timeout)
+ throws IOException
+ {
+ boolean success = false;
+ Socket sock = null;
+ try {
+ sock = socketFactory.createSocket();
+ String dnAddr = dn.getXferAddr(connectToDnViaHostname);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Connecting to datanode " + dnAddr);
+ }
+ NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
+ sock.setSoTimeout(timeout);
+
+ OutputStream unbufOut = NetUtils.getOutputStream(sock);
+ InputStream unbufIn = NetUtils.getInputStream(sock);
+ IOStreamPair ret;
+ if (encryptionKey != null) {
+ ret = DataTransferEncryptor.getEncryptedStreams(
+ unbufOut, unbufIn, encryptionKey);
+ } else {
+ ret = new IOStreamPair(unbufIn, unbufOut);
+ }
+ success = true;
+ return ret;
+ } finally {
+ if (!success) {
+ IOUtils.closeSocket(sock);
+ }
+ }
+ }
+
+ /**
+ * Infer the checksum type for a replica by sending an OP_READ_BLOCK
+ * for the first byte of that replica. This is used for compatibility
+ * with older HDFS versions which did not include the checksum type in
+ * OpBlockChecksumResponseProto.
+ *
+ * @param in input stream from datanode
+ * @param out output stream to datanode
+ * @param lb the located block
+ * @param clientName the name of the DFSClient requesting the checksum
+ * @param dn the connected datanode
+ * @return the inferred checksum type
+ * @throws IOException if an error occurs
+ */
+ private static Type inferChecksumTypeByReading(
+ String clientName, SocketFactory socketFactory, int socketTimeout,
+ LocatedBlock lb, DatanodeInfo dn,
+ DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
+ throws IOException {
+ IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
+ encryptionKey, dn, socketTimeout);
+
+ try {
+ DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
+ HdfsConstants.SMALL_BUFFER_SIZE));
+ DataInputStream in = new DataInputStream(pair.in);
+
+ new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true);
+ final BlockOpResponseProto reply =
+ BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
+
+ if (reply.getStatus() != Status.SUCCESS) {
+ if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
+ throw new InvalidBlockTokenException();
+ } else {
+ throw new IOException("Bad response " + reply + " trying to read "
+ + lb.getBlock() + " from datanode " + dn);
+ }
+ }
+
+ return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
+ } finally {
+ IOUtils.cleanup(null, pair.in, pair.out);
+ }
+ }
+
+ /**
* Set permissions to a file or directory.
* @param src path name.
* @param permission
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java Thu Jan 24 02:45:45 2013
@@ -40,14 +40,18 @@ import org.apache.hadoop.tools.GetUserMa
public class HDFSPolicyProvider extends PolicyProvider {
private static final Service[] hdfsServices =
new Service[] {
- new Service("security.client.protocol.acl", ClientProtocol.class),
- new Service("security.client.datanode.protocol.acl",
- ClientDatanodeProtocol.class),
- new Service("security.datanode.protocol.acl", DatanodeProtocol.class),
- new Service("security.inter.datanode.protocol.acl",
- InterDatanodeProtocol.class),
- new Service("security.namenode.protocol.acl", NamenodeProtocol.class),
- new Service("security.qjournal.service.protocol.acl", QJournalProtocol.class),
+ new Service(CommonConfigurationKeys.SECURITY_CLIENT_PROTOCOL_ACL,
+ ClientProtocol.class),
+ new Service(CommonConfigurationKeys.SECURITY_CLIENT_DATANODE_PROTOCOL_ACL,
+ ClientDatanodeProtocol.class),
+ new Service(CommonConfigurationKeys.SECURITY_DATANODE_PROTOCOL_ACL,
+ DatanodeProtocol.class),
+ new Service(CommonConfigurationKeys.SECURITY_INTER_DATANODE_PROTOCOL_ACL,
+ InterDatanodeProtocol.class),
+ new Service(CommonConfigurationKeys.SECURITY_NAMENODE_PROTOCOL_ACL,
+ NamenodeProtocol.class),
+ new Service(CommonConfigurationKeys.SECURITY_QJOURNAL_SERVICE_PROTOCOL_ACL,
+ QJournalProtocol.class),
new Service(CommonConfigurationKeys.SECURITY_HA_SERVICE_PROTOCOL_ACL,
HAServiceProtocol.class),
new Service(CommonConfigurationKeys.SECURITY_ZKFC_PROTOCOL_ACL,
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java Thu Jan 24 02:45:45 2013
@@ -380,7 +380,8 @@ public class RemoteBlockReader extends F
// in and out will be closed when sock is closed (by the caller)
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT)));
- new Sender(out).readBlock(block, blockToken, clientName, startOffset, len);
+ new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
+ verifyChecksum);
//
// Get bytes in block, set streams
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java Thu Jan 24 02:45:45 2013
@@ -392,7 +392,8 @@ public class RemoteBlockReader2 impleme
// in and out will be closed when sock is closed (by the caller)
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
ioStreams.out));
- new Sender(out).readBlock(block, blockToken, clientName, startOffset, len);
+ new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
+ verifyChecksum);
//
// Get bytes in block
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java Thu Jan 24 02:45:45 2013
@@ -55,12 +55,15 @@ public interface DataTransferProtocol {
* @param clientName client's name.
* @param blockOffset offset of the block.
* @param length maximum number of bytes for this read.
+ * @param sendChecksum if false, the DN should skip reading and sending
+ * checksums
*/
public void readBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final long blockOffset,
- final long length) throws IOException;
+ final long length,
+ final boolean sendChecksum) throws IOException;
/**
* Write a block to a datanode pipeline.
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java Thu Jan 24 02:45:45 2013
@@ -88,7 +88,8 @@ public abstract class Receiver implement
PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
proto.getHeader().getClientName(),
proto.getOffset(),
- proto.getLen());
+ proto.getLen(),
+ proto.getSendChecksums());
}
/** Receive OP_WRITE_BLOCK */
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java Thu Jan 24 02:45:45 2013
@@ -62,6 +62,10 @@ public class Sender implements DataTrans
private static void send(final DataOutputStream out, final Op opcode,
final Message proto) throws IOException {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Sending DataTransferOp " + proto.getClass().getSimpleName()
+ + ": " + proto);
+ }
op(out, opcode);
proto.writeDelimitedTo(out);
out.flush();
@@ -72,12 +76,14 @@ public class Sender implements DataTrans
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final long blockOffset,
- final long length) throws IOException {
+ final long length,
+ final boolean sendChecksum) throws IOException {
OpReadBlockProto proto = OpReadBlockProto.newBuilder()
.setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
.setOffset(blockOffset)
.setLen(length)
+ .setSendChecksums(sendChecksum)
.build();
send(out, Op.READ_BLOCK, proto);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java Thu Jan 24 02:45:45 2013
@@ -77,7 +77,7 @@ public class ClientDatanodeProtocolTrans
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
private final ClientDatanodeProtocolPB rpcProxy;
- private final static RefreshNamenodesRequestProto REFRESH_NAMENODES =
+ private final static RefreshNamenodesRequestProto VOID_REFRESH_NAMENODES =
RefreshNamenodesRequestProto.newBuilder().build();
public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
@@ -170,7 +170,7 @@ public class ClientDatanodeProtocolTrans
@Override
public void refreshNamenodes() throws IOException {
try {
- rpcProxy.refreshNamenodes(NULL_CONTROLLER, REFRESH_NAMENODES);
+ rpcProxy.refreshNamenodes(NULL_CONTROLLER, VOID_REFRESH_NAMENODES);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java Thu Jan 24 02:45:45 2013
@@ -40,8 +40,6 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
@@ -73,8 +71,6 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
@@ -107,8 +103,6 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto;
@@ -143,6 +137,12 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto;
+import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto;
import org.apache.hadoop.security.token.Token;
import com.google.protobuf.RpcController;
@@ -171,6 +171,78 @@ public class ClientNamenodeProtocolServe
static final DisallowSnapshotResponseProto VOID_DISALLOW_SNAPSHOT_RESPONSE =
DisallowSnapshotResponseProto.newBuilder().build();
+ private static final CreateResponseProto VOID_CREATE_RESPONSE =
+ CreateResponseProto.newBuilder().build();
+
+ private static final AppendResponseProto VOID_APPEND_RESPONSE =
+ AppendResponseProto.newBuilder().build();
+
+ private static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE =
+ SetPermissionResponseProto.newBuilder().build();
+
+ private static final SetOwnerResponseProto VOID_SET_OWNER_RESPONSE =
+ SetOwnerResponseProto.newBuilder().build();
+
+ private static final AbandonBlockResponseProto VOID_ADD_BLOCK_RESPONSE =
+ AbandonBlockResponseProto.newBuilder().build();
+
+ private static final ReportBadBlocksResponseProto VOID_REP_BAD_BLOCK_RESPONSE =
+ ReportBadBlocksResponseProto.newBuilder().build();
+
+ private static final ConcatResponseProto VOID_CONCAT_RESPONSE =
+ ConcatResponseProto.newBuilder().build();
+
+ private static final Rename2ResponseProto VOID_RENAME2_RESPONSE =
+ Rename2ResponseProto.newBuilder().build();
+
+ private static final GetListingResponseProto VOID_GETLISTING_RESPONSE =
+ GetListingResponseProto.newBuilder().build();
+
+ private static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE =
+ RenewLeaseResponseProto.newBuilder().build();
+
+ private static final SaveNamespaceResponseProto VOID_SAVENAMESPACE_RESPONSE =
+ SaveNamespaceResponseProto.newBuilder().build();
+
+ private static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE =
+ RefreshNodesResponseProto.newBuilder().build();
+
+ private static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE =
+ FinalizeUpgradeResponseProto.newBuilder().build();
+
+ private static final MetaSaveResponseProto VOID_METASAVE_RESPONSE =
+ MetaSaveResponseProto.newBuilder().build();
+
+ private static final GetFileInfoResponseProto VOID_GETFILEINFO_RESPONSE =
+ GetFileInfoResponseProto.newBuilder().build();
+
+ private static final GetFileLinkInfoResponseProto VOID_GETFILELINKINFO_RESPONSE =
+ GetFileLinkInfoResponseProto.newBuilder().build();
+
+ private static final SetQuotaResponseProto VOID_SETQUOTA_RESPONSE =
+ SetQuotaResponseProto.newBuilder().build();
+
+ private static final FsyncResponseProto VOID_FSYNC_RESPONSE =
+ FsyncResponseProto.newBuilder().build();
+
+ private static final SetTimesResponseProto VOID_SETTIMES_RESPONSE =
+ SetTimesResponseProto.newBuilder().build();
+
+ private static final CreateSymlinkResponseProto VOID_CREATESYMLINK_RESPONSE =
+ CreateSymlinkResponseProto.newBuilder().build();
+
+ private static final UpdatePipelineResponseProto
+ VOID_UPDATEPIPELINE_RESPONSE =
+ UpdatePipelineResponseProto.newBuilder().build();
+
+ private static final CancelDelegationTokenResponseProto
+ VOID_CANCELDELEGATIONTOKEN_RESPONSE =
+ CancelDelegationTokenResponseProto.newBuilder().build();
+
+ private static final SetBalancerBandwidthResponseProto
+ VOID_SETBALANCERBANDWIDTH_RESPONSE =
+ SetBalancerBandwidthResponseProto.newBuilder().build();
+
/**
* Constructor
*
@@ -215,9 +287,6 @@ public class ClientNamenodeProtocolServe
}
- static final CreateResponseProto VOID_CREATE_RESPONSE =
- CreateResponseProto.newBuilder().build();
-
@Override
public CreateResponseProto create(RpcController controller,
CreateRequestProto req) throws ServiceException {
@@ -232,9 +301,6 @@ public class ClientNamenodeProtocolServe
return VOID_CREATE_RESPONSE;
}
- static final AppendResponseProto NULL_APPEND_RESPONSE =
- AppendResponseProto.newBuilder().build();
-
@Override
public AppendResponseProto append(RpcController controller,
AppendRequestProto req) throws ServiceException {
@@ -244,7 +310,7 @@ public class ClientNamenodeProtocolServe
return AppendResponseProto.newBuilder()
.setBlock(PBHelper.convert(result)).build();
}
- return NULL_APPEND_RESPONSE;
+ return VOID_APPEND_RESPONSE;
} catch (IOException e) {
throw new ServiceException(e);
}
@@ -263,9 +329,6 @@ public class ClientNamenodeProtocolServe
}
- static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE =
- SetPermissionResponseProto.newBuilder().build();
-
@Override
public SetPermissionResponseProto setPermission(RpcController controller,
SetPermissionRequestProto req) throws ServiceException {
@@ -277,9 +340,6 @@ public class ClientNamenodeProtocolServe
return VOID_SET_PERM_RESPONSE;
}
- static final SetOwnerResponseProto VOID_SET_OWNER_RESPONSE =
- SetOwnerResponseProto.newBuilder().build();
-
@Override
public SetOwnerResponseProto setOwner(RpcController controller,
SetOwnerRequestProto req) throws ServiceException {
@@ -293,9 +353,6 @@ public class ClientNamenodeProtocolServe
return VOID_SET_OWNER_RESPONSE;
}
- static final AbandonBlockResponseProto VOID_ADD_BLOCK_RESPONSE =
- AbandonBlockResponseProto.newBuilder().build();
-
@Override
public AbandonBlockResponseProto abandonBlock(RpcController controller,
AbandonBlockRequestProto req) throws ServiceException {
@@ -361,9 +418,6 @@ public class ClientNamenodeProtocolServe
}
}
- static final ReportBadBlocksResponseProto VOID_REP_BAD_BLOCK_RESPONSE =
- ReportBadBlocksResponseProto.newBuilder().build();
-
@Override
public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller,
ReportBadBlocksRequestProto req) throws ServiceException {
@@ -377,9 +431,6 @@ public class ClientNamenodeProtocolServe
return VOID_REP_BAD_BLOCK_RESPONSE;
}
- static final ConcatResponseProto VOID_CONCAT_RESPONSE =
- ConcatResponseProto.newBuilder().build();
-
@Override
public ConcatResponseProto concat(RpcController controller,
ConcatRequestProto req) throws ServiceException {
@@ -403,9 +454,6 @@ public class ClientNamenodeProtocolServe
}
}
- static final Rename2ResponseProto VOID_RENAME2_RESPONSE =
- Rename2ResponseProto.newBuilder().build();
-
@Override
public Rename2ResponseProto rename2(RpcController controller,
Rename2RequestProto req) throws ServiceException {
@@ -442,8 +490,6 @@ public class ClientNamenodeProtocolServe
}
}
- static final GetListingResponseProto NULL_GETLISTING_RESPONSE =
- GetListingResponseProto.newBuilder().build();
@Override
public GetListingResponseProto getListing(RpcController controller,
GetListingRequestProto req) throws ServiceException {
@@ -455,16 +501,13 @@ public class ClientNamenodeProtocolServe
return GetListingResponseProto.newBuilder().setDirList(
PBHelper.convert(result)).build();
} else {
- return NULL_GETLISTING_RESPONSE;
+ return VOID_GETLISTING_RESPONSE;
}
} catch (IOException e) {
throw new ServiceException(e);
}
}
- static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE =
- RenewLeaseResponseProto.newBuilder().build();
-
@Override
public RenewLeaseResponseProto renewLease(RpcController controller,
RenewLeaseRequestProto req) throws ServiceException {
@@ -549,9 +592,6 @@ public class ClientNamenodeProtocolServe
}
}
- static final SaveNamespaceResponseProto VOID_SAVENAMESPACE_RESPONSE =
- SaveNamespaceResponseProto.newBuilder().build();
-
@Override
public SaveNamespaceResponseProto saveNamespace(RpcController controller,
SaveNamespaceRequestProto req) throws ServiceException {
@@ -578,9 +618,6 @@ public class ClientNamenodeProtocolServe
}
- static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE =
- RefreshNodesResponseProto.newBuilder().build();
-
@Override
public RefreshNodesResponseProto refreshNodes(RpcController controller,
RefreshNodesRequestProto req) throws ServiceException {
@@ -593,9 +630,6 @@ public class ClientNamenodeProtocolServe
}
- static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE =
- FinalizeUpgradeResponseProto.newBuilder().build();
-
@Override
public FinalizeUpgradeResponseProto finalizeUpgrade(RpcController controller,
FinalizeUpgradeRequestProto req) throws ServiceException {
@@ -622,9 +656,6 @@ public class ClientNamenodeProtocolServe
}
}
- static final MetaSaveResponseProto VOID_METASAVE_RESPONSE =
- MetaSaveResponseProto.newBuilder().build();
-
@Override
public MetaSaveResponseProto metaSave(RpcController controller,
MetaSaveRequestProto req) throws ServiceException {
@@ -637,8 +668,6 @@ public class ClientNamenodeProtocolServe
}
- static final GetFileInfoResponseProto NULL_GETFILEINFO_RESPONSE =
- GetFileInfoResponseProto.newBuilder().build();
@Override
public GetFileInfoResponseProto getFileInfo(RpcController controller,
GetFileInfoRequestProto req) throws ServiceException {
@@ -649,14 +678,12 @@ public class ClientNamenodeProtocolServe
return GetFileInfoResponseProto.newBuilder().setFs(
PBHelper.convert(result)).build();
}
- return NULL_GETFILEINFO_RESPONSE;
+ return VOID_GETFILEINFO_RESPONSE;
} catch (IOException e) {
throw new ServiceException(e);
}
}
- static final GetFileLinkInfoResponseProto NULL_GETFILELINKINFO_RESPONSE =
- GetFileLinkInfoResponseProto.newBuilder().build();
@Override
public GetFileLinkInfoResponseProto getFileLinkInfo(RpcController controller,
GetFileLinkInfoRequestProto req) throws ServiceException {
@@ -668,7 +695,7 @@ public class ClientNamenodeProtocolServe
PBHelper.convert(result)).build();
} else {
System.out.println("got null result for getFileLinkInfo for " + req.getSrc());
- return NULL_GETFILELINKINFO_RESPONSE;
+ return VOID_GETFILELINKINFO_RESPONSE;
}
} catch (IOException e) {
@@ -689,9 +716,6 @@ public class ClientNamenodeProtocolServe
}
}
- static final SetQuotaResponseProto VOID_SETQUOTA_RESPONSE =
- SetQuotaResponseProto.newBuilder().build();
-
@Override
public SetQuotaResponseProto setQuota(RpcController controller,
SetQuotaRequestProto req) throws ServiceException {
@@ -704,9 +728,6 @@ public class ClientNamenodeProtocolServe
}
}
- static final FsyncResponseProto VOID_FSYNC_RESPONSE =
- FsyncResponseProto.newBuilder().build();
-
@Override
public FsyncResponseProto fsync(RpcController controller,
FsyncRequestProto req) throws ServiceException {
@@ -718,9 +739,6 @@ public class ClientNamenodeProtocolServe
}
}
- static final SetTimesResponseProto VOID_SETTIMES_RESPONSE =
- SetTimesResponseProto.newBuilder().build();
-
@Override
public SetTimesResponseProto setTimes(RpcController controller,
SetTimesRequestProto req) throws ServiceException {
@@ -732,9 +750,6 @@ public class ClientNamenodeProtocolServe
}
}
- static final CreateSymlinkResponseProto VOID_CREATESYMLINK_RESPONSE =
- CreateSymlinkResponseProto.newBuilder().build();
-
@Override
public CreateSymlinkResponseProto createSymlink(RpcController controller,
CreateSymlinkRequestProto req) throws ServiceException {
@@ -752,8 +767,12 @@ public class ClientNamenodeProtocolServe
GetLinkTargetRequestProto req) throws ServiceException {
try {
String result = server.getLinkTarget(req.getPath());
- return GetLinkTargetResponseProto.newBuilder().setTargetPath(result)
- .build();
+ GetLinkTargetResponseProto.Builder builder = GetLinkTargetResponseProto
+ .newBuilder();
+ if (result != null) {
+ builder.setTargetPath(result);
+ }
+ return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
@@ -774,9 +793,6 @@ public class ClientNamenodeProtocolServe
}
}
- static final UpdatePipelineResponseProto VOID_UPDATEPIPELINE_RESPONSE =
- UpdatePipelineResponseProto.newBuilder().build();
-
@Override
public UpdatePipelineResponseProto updatePipeline(RpcController controller,
UpdatePipelineRequestProto req) throws ServiceException {
@@ -818,16 +834,12 @@ public class ClientNamenodeProtocolServe
long result = server.renewDelegationToken(PBHelper
.convertDelegationToken(req.getToken()));
return RenewDelegationTokenResponseProto.newBuilder()
- .setNewExireTime(result).build();
+ .setNewExpiryTime(result).build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
- static final CancelDelegationTokenResponseProto
- VOID_CANCELDELEGATIONTOKEN_RESPONSE =
- CancelDelegationTokenResponseProto.newBuilder().build();
-
@Override
public CancelDelegationTokenResponseProto cancelDelegationToken(
RpcController controller, CancelDelegationTokenRequestProto req)
@@ -841,10 +853,6 @@ public class ClientNamenodeProtocolServe
}
}
- static final SetBalancerBandwidthResponseProto
- VOID_SETBALANCERBANDWIDTH_RESPONSE =
- SetBalancerBandwidthResponseProto.newBuilder().build();
-
@Override
public SetBalancerBandwidthResponseProto setBalancerBandwidth(
RpcController controller, SetBalancerBandwidthRequestProto req)
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Thu Jan 24 02:45:45 2013
@@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
@@ -70,14 +69,13 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
@@ -92,7 +90,6 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto;
@@ -120,6 +117,10 @@ import org.apache.hadoop.ipc.ProtocolTra
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto;
+import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
import org.apache.hadoop.security.token.Token;
import com.google.protobuf.ByteString;
@@ -136,6 +137,29 @@ public class ClientNamenodeProtocolTrans
ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator {
final private ClientNamenodeProtocolPB rpcProxy;
+ static final GetServerDefaultsRequestProto VOID_GET_SERVER_DEFAULT_REQUEST =
+ GetServerDefaultsRequestProto.newBuilder().build();
+
+ private final static GetFsStatusRequestProto VOID_GET_FSSTATUS_REQUEST =
+ GetFsStatusRequestProto.newBuilder().build();
+
+ private final static SaveNamespaceRequestProto VOID_SAVE_NAMESPACE_REQUEST =
+ SaveNamespaceRequestProto.newBuilder().build();
+
+ private final static RollEditsRequestProto VOID_ROLLEDITS_REQUEST =
+ RollEditsRequestProto.getDefaultInstance();
+
+ private final static RefreshNodesRequestProto VOID_REFRESH_NODES_REQUEST =
+ RefreshNodesRequestProto.newBuilder().build();
+
+ private final static FinalizeUpgradeRequestProto
+ VOID_FINALIZE_UPGRADE_REQUEST =
+ FinalizeUpgradeRequestProto.newBuilder().build();
+
+ private final static GetDataEncryptionKeyRequestProto
+ VOID_GET_DATA_ENCRYPTIONKEY_REQUEST =
+ GetDataEncryptionKeyRequestProto.newBuilder().build();
+
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
rpcProxy = proxy;
}
@@ -167,7 +191,7 @@ public class ClientNamenodeProtocolTrans
@Override
public FsServerDefaults getServerDefaults() throws IOException {
- GetServerDefaultsRequestProto req = GetServerDefaultsRequestProto.newBuilder().build();
+ GetServerDefaultsRequestProto req = VOID_GET_SERVER_DEFAULT_REQUEST;
try {
return PBHelper
.convert(rpcProxy.getServerDefaults(null, req).getServerDefaults());
@@ -480,9 +504,9 @@ public class ClientNamenodeProtocolTrans
@Override
public long[] getStats() throws IOException {
- GetFsStatusRequestProto req = GetFsStatusRequestProto.newBuilder().build();
try {
- return PBHelper.convert(rpcProxy.getFsStats(null, req));
+ return PBHelper.convert(rpcProxy.getFsStats(null,
+ VOID_GET_FSSTATUS_REQUEST));
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@@ -529,10 +553,8 @@ public class ClientNamenodeProtocolTrans
@Override
public void saveNamespace() throws AccessControlException, IOException {
- SaveNamespaceRequestProto req = SaveNamespaceRequestProto.newBuilder()
- .build();
try {
- rpcProxy.saveNamespace(null, req);
+ rpcProxy.saveNamespace(null, VOID_SAVE_NAMESPACE_REQUEST);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@@ -540,9 +562,9 @@ public class ClientNamenodeProtocolTrans
@Override
public long rollEdits() throws AccessControlException, IOException {
- RollEditsRequestProto req = RollEditsRequestProto.getDefaultInstance();
try {
- RollEditsResponseProto resp = rpcProxy.rollEdits(null, req);
+ RollEditsResponseProto resp = rpcProxy.rollEdits(null,
+ VOID_ROLLEDITS_REQUEST);
return resp.getNewSegmentTxId();
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
@@ -564,9 +586,8 @@ public class ClientNamenodeProtocolTrans
@Override
public void refreshNodes() throws IOException {
- RefreshNodesRequestProto req = RefreshNodesRequestProto.newBuilder().build();
try {
- rpcProxy.refreshNodes(null, req);
+ rpcProxy.refreshNodes(null, VOID_REFRESH_NODES_REQUEST);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@@ -574,9 +595,8 @@ public class ClientNamenodeProtocolTrans
@Override
public void finalizeUpgrade() throws IOException {
- FinalizeUpgradeRequestProto req = FinalizeUpgradeRequestProto.newBuilder().build();
try {
- rpcProxy.finalizeUpgrade(null, req);
+ rpcProxy.finalizeUpgrade(null, VOID_FINALIZE_UPGRADE_REQUEST);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@@ -722,7 +742,8 @@ public class ClientNamenodeProtocolTrans
GetLinkTargetRequestProto req = GetLinkTargetRequestProto.newBuilder()
.setPath(path).build();
try {
- return rpcProxy.getLinkTarget(null, req).getTargetPath();
+ GetLinkTargetResponseProto rsp = rpcProxy.getLinkTarget(null, req);
+ return rsp.hasTargetPath() ? rsp.getTargetPath() : null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@@ -783,7 +804,7 @@ public class ClientNamenodeProtocolTrans
setToken(PBHelper.convert(token)).
build();
try {
- return rpcProxy.renewDelegationToken(null, req).getNewExireTime();
+ return rpcProxy.renewDelegationToken(null, req).getNewExpiryTime();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@@ -824,12 +845,10 @@ public class ClientNamenodeProtocolTrans
@Override
public DataEncryptionKey getDataEncryptionKey() throws IOException {
- GetDataEncryptionKeyRequestProto req = GetDataEncryptionKeyRequestProto
- .newBuilder().build();
try {
- GetDataEncryptionKeyResponseProto rsp =
- rpcProxy.getDataEncryptionKey(null, req);
- return rsp.hasDataEncryptionKey() ?
+ GetDataEncryptionKeyResponseProto rsp = rpcProxy.getDataEncryptionKey(
+ null, VOID_GET_DATA_ENCRYPTIONKEY_REQUEST);
+ return rsp.hasDataEncryptionKey() ?
PBHelper.convert(rsp.getDataEncryptionKey()) : null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java Thu Jan 24 02:45:45 2013
@@ -84,7 +84,7 @@ public class DatanodeProtocolClientSideT
/** RpcController is not used and hence is set to null */
private final DatanodeProtocolPB rpcProxy;
- private static final VersionRequestProto VERSION_REQUEST =
+ private static final VersionRequestProto VOID_VERSION_REQUEST =
VersionRequestProto.newBuilder().build();
private final static RpcController NULL_CONTROLLER = null;
@@ -243,7 +243,7 @@ public class DatanodeProtocolClientSideT
public NamespaceInfo versionRequest() throws IOException {
try {
return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
- VERSION_REQUEST).getInfo());
+ VOID_VERSION_REQUEST).getInfo());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java Thu Jan 24 02:45:45 2013
@@ -62,15 +62,17 @@ public class DatanodeProtocolServerSideT
DatanodeProtocolPB {
private final DatanodeProtocol impl;
- private static final ErrorReportResponseProto ERROR_REPORT_RESPONSE_PROTO =
- ErrorReportResponseProto.newBuilder().build();
+ private static final ErrorReportResponseProto
+ VOID_ERROR_REPORT_RESPONSE_PROTO =
+ ErrorReportResponseProto.newBuilder().build();
private static final BlockReceivedAndDeletedResponseProto
- BLOCK_RECEIVED_AND_DELETE_RESPONSE =
+ VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE =
BlockReceivedAndDeletedResponseProto.newBuilder().build();
- private static final ReportBadBlocksResponseProto REPORT_BAD_BLOCK_RESPONSE =
- ReportBadBlocksResponseProto.newBuilder().build();
+ private static final ReportBadBlocksResponseProto
+ VOID_REPORT_BAD_BLOCK_RESPONSE =
+ ReportBadBlocksResponseProto.newBuilder().build();
private static final CommitBlockSynchronizationResponseProto
- COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO =
+ VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO =
CommitBlockSynchronizationResponseProto.newBuilder().build();
public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl) {
@@ -180,7 +182,7 @@ public class DatanodeProtocolServerSideT
} catch (IOException e) {
throw new ServiceException(e);
}
- return BLOCK_RECEIVED_AND_DELETE_RESPONSE;
+ return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE;
}
@Override
@@ -192,7 +194,7 @@ public class DatanodeProtocolServerSideT
} catch (IOException e) {
throw new ServiceException(e);
}
- return ERROR_REPORT_RESPONSE_PROTO;
+ return VOID_ERROR_REPORT_RESPONSE_PROTO;
}
@Override
@@ -221,7 +223,7 @@ public class DatanodeProtocolServerSideT
} catch (IOException e) {
throw new ServiceException(e);
}
- return REPORT_BAD_BLOCK_RESPONSE;
+ return VOID_REPORT_BAD_BLOCK_RESPONSE;
}
@Override
@@ -242,6 +244,6 @@ public class DatanodeProtocolServerSideT
} catch (IOException e) {
throw new ServiceException(e);
}
- return COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
+ return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java Thu Jan 24 02:45:45 2013
@@ -42,6 +42,13 @@ public class JournalProtocolServerSideTr
/** Server side implementation to delegate the requests to */
private final JournalProtocol impl;
+ private final static JournalResponseProto VOID_JOURNAL_RESPONSE =
+ JournalResponseProto.newBuilder().build();
+
+ private final static StartLogSegmentResponseProto
+ VOID_START_LOG_SEGMENT_RESPONSE =
+ StartLogSegmentResponseProto.newBuilder().build();
+
public JournalProtocolServerSideTranslatorPB(JournalProtocol impl) {
this.impl = impl;
}
@@ -56,7 +63,7 @@ public class JournalProtocolServerSideTr
} catch (IOException e) {
throw new ServiceException(e);
}
- return JournalResponseProto.newBuilder().build();
+ return VOID_JOURNAL_RESPONSE;
}
/** @see JournalProtocol#startLogSegment */
@@ -69,7 +76,7 @@ public class JournalProtocolServerSideTr
} catch (IOException e) {
throw new ServiceException(e);
}
- return StartLogSegmentResponseProto.newBuilder().build();
+ return VOID_START_LOG_SEGMENT_RESPONSE;
}
@Override
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java Thu Jan 24 02:45:45 2013
@@ -63,6 +63,12 @@ public class NamenodeProtocolServerSideT
NamenodeProtocolPB {
private final NamenodeProtocol impl;
+ private final static ErrorReportResponseProto VOID_ERROR_REPORT_RESPONSE =
+ ErrorReportResponseProto.newBuilder().build();
+
+ private final static EndCheckpointResponseProto VOID_END_CHECKPOINT_RESPONSE =
+ EndCheckpointResponseProto.newBuilder().build();
+
public NamenodeProtocolServerSideTranslatorPB(NamenodeProtocol impl) {
this.impl = impl;
}
@@ -91,8 +97,12 @@ public class NamenodeProtocolServerSideT
} catch (IOException e) {
throw new ServiceException(e);
}
- return GetBlockKeysResponseProto.newBuilder()
- .setKeys(PBHelper.convert(keys)).build();
+ GetBlockKeysResponseProto.Builder builder =
+ GetBlockKeysResponseProto.newBuilder();
+ if (keys != null) {
+ builder.setKeys(PBHelper.convert(keys));
+ }
+ return builder.build();
}
@Override
@@ -143,7 +153,7 @@ public class NamenodeProtocolServerSideT
} catch (IOException e) {
throw new ServiceException(e);
}
- return ErrorReportResponseProto.newBuilder().build();
+ return VOID_ERROR_REPORT_RESPONSE;
}
@Override
@@ -181,7 +191,7 @@ public class NamenodeProtocolServerSideT
} catch (IOException e) {
throw new ServiceException(e);
}
- return EndCheckpointResponseProto.newBuilder().build();
+ return VOID_END_CHECKPOINT_RESPONSE;
}
@Override
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java Thu Jan 24 02:45:45 2013
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
@@ -67,13 +68,13 @@ public class NamenodeProtocolTranslatorP
/*
* Protobuf requests with no parameters instantiated only once
*/
- private static final GetBlockKeysRequestProto GET_BLOCKKEYS =
+ private static final GetBlockKeysRequestProto VOID_GET_BLOCKKEYS_REQUEST =
GetBlockKeysRequestProto.newBuilder().build();
- private static final GetTransactionIdRequestProto GET_TRANSACTIONID =
+ private static final GetTransactionIdRequestProto VOID_GET_TRANSACTIONID_REQUEST =
GetTransactionIdRequestProto.newBuilder().build();
- private static final RollEditLogRequestProto ROLL_EDITLOG =
+ private static final RollEditLogRequestProto VOID_ROLL_EDITLOG_REQUEST =
RollEditLogRequestProto.newBuilder().build();
- private static final VersionRequestProto VERSION_REQUEST =
+ private static final VersionRequestProto VOID_VERSION_REQUEST =
VersionRequestProto.newBuilder().build();
final private NamenodeProtocolPB rpcProxy;
@@ -104,8 +105,9 @@ public class NamenodeProtocolTranslatorP
@Override
public ExportedBlockKeys getBlockKeys() throws IOException {
try {
- return PBHelper.convert(rpcProxy.getBlockKeys(NULL_CONTROLLER,
- GET_BLOCKKEYS).getKeys());
+ GetBlockKeysResponseProto rsp = rpcProxy.getBlockKeys(NULL_CONTROLLER,
+ VOID_GET_BLOCKKEYS_REQUEST);
+ return rsp.hasKeys() ? PBHelper.convert(rsp.getKeys()) : null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@@ -114,8 +116,8 @@ public class NamenodeProtocolTranslatorP
@Override
public long getTransactionID() throws IOException {
try {
- return rpcProxy.getTransactionId(NULL_CONTROLLER, GET_TRANSACTIONID)
- .getTxId();
+ return rpcProxy.getTransactionId(NULL_CONTROLLER,
+ VOID_GET_TRANSACTIONID_REQUEST).getTxId();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@@ -135,7 +137,7 @@ public class NamenodeProtocolTranslatorP
public CheckpointSignature rollEditLog() throws IOException {
try {
return PBHelper.convert(rpcProxy.rollEditLog(NULL_CONTROLLER,
- ROLL_EDITLOG).getSignature());
+ VOID_ROLL_EDITLOG_REQUEST).getSignature());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@@ -145,7 +147,7 @@ public class NamenodeProtocolTranslatorP
public NamespaceInfo versionRequest() throws IOException {
try {
return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
- VERSION_REQUEST).getInfo());
+ VOID_VERSION_REQUEST).getInfo());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java Thu Jan 24 02:45:45 2013
@@ -38,6 +38,10 @@ public class RefreshAuthorizationPolicyP
private final static RpcController NULL_CONTROLLER = null;
private final RefreshAuthorizationPolicyProtocolPB rpcProxy;
+ private final static RefreshServiceAclRequestProto
+ VOID_REFRESH_SERVICE_ACL_REQUEST =
+ RefreshServiceAclRequestProto.newBuilder().build();
+
public RefreshAuthorizationPolicyProtocolClientSideTranslatorPB(
RefreshAuthorizationPolicyProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
@@ -50,10 +54,9 @@ public class RefreshAuthorizationPolicyP
@Override
public void refreshServiceAcl() throws IOException {
- RefreshServiceAclRequestProto request = RefreshServiceAclRequestProto
- .newBuilder().build();
try {
- rpcProxy.refreshServiceAcl(NULL_CONTROLLER, request);
+ rpcProxy.refreshServiceAcl(NULL_CONTROLLER,
+ VOID_REFRESH_SERVICE_ACL_REQUEST);
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}