You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2011/10/20 18:31:58 UTC

svn commit: r1186896 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/protocol/

Author: suresh
Date: Thu Oct 20 16:31:57 2011
New Revision: 1186896

URL: http://svn.apache.org/viewvc?rev=1186896&view=rev
Log:
HDFS-2459. Separate datatypes for Journal Protocol. (suresh)


Added:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalProtocolServerSideTranslatorR23.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalProtocolTranslatorR23.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalWireProtocol.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/NamenodeRegistrationWritable.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/StorageInfoWritable.java
Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1186896&r1=1186895&r2=1186896&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Oct 20 16:31:57 2011
@@ -61,6 +61,8 @@ Trunk (unreleased changes)
     HDFS-2395. Add a root element in the JSON responses of webhdfs.
     (szetszwo)
 
+    HDFS-2459. Separate datatypes for Journal Protocol. (suresh)
+
 	HDFS-2181 Separate HDFS Client wire protocol data types (sanjay)
 
     HDFS-2294. Download of commons-daemon TAR should not be under target (tucu)

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalProtocolServerSideTranslatorR23.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalProtocolServerSideTranslatorR23.java?rev=1186896&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalProtocolServerSideTranslatorR23.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalProtocolServerSideTranslatorR23.java Thu Oct 20 16:31:57 2011
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+
+/**
+ * This class is used on the server side. Calls come across the wire for the
+ * protocol family of Release 23 onwards. This class translates the R23 data
+ * types to the native data types used inside the NN as specified in the generic
+ * JournalProtocol.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class JournalProtocolServerSideTranslatorR23 implements
+    JournalWireProtocol {
+  final private JournalProtocol server;
+
+  /**
+   * Constructor
+   * 
+   * @param server - the NN server
+   * @throws IOException
+   */
+  public JournalProtocolServerSideTranslatorR23(JournalProtocol server)
+      throws IOException {
+    this.server = server;
+  }
+
+  /**
+   * The client side will redirect getProtocolSignature to
+   * getProtocolSignature2.
+   * 
+   * However the RPC layer below on the Server side will call getProtocolVersion
+   * and possibly in the future getProtocolSignature. Hence we still implement
+   * it even though the end client's call will never reach here.
+   */
+  @Override
+  public ProtocolSignature getProtocolSignature(String protocol,
+      long clientVersion, int clientMethodsHash) throws IOException {
+    /**
+     * Don't forward this to the server. The protocol version and signature is
+     * that of {@link JournalProtocol}
+     * 
+     */
+    if (!protocol.equals(RPC.getProtocolName(JournalWireProtocol.class))) {
+      throw new IOException("Namenode Serverside implements " +
+          RPC.getProtocolName(JournalWireProtocol.class) +
+          ". The following requested protocol is unknown: " + protocol);
+    }
+
+    return ProtocolSignature.getProtocolSignature(clientMethodsHash,
+        JournalWireProtocol.versionID, JournalWireProtocol.class);
+  }
+
+  @Override
+  public ProtocolSignatureWritable 
+          getProtocolSignature2(
+      String protocol, long clientVersion, int clientMethodsHash)
+      throws IOException {
+    /**
+     * Don't forward this to the server. The protocol version and signature is
+     * that of {@link ClientNamenodeProtocol}
+     * 
+     */
+
+    return ProtocolSignatureWritable.convert(
+        this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
+  }
+
+  @Override
+  public long getProtocolVersion(String protocol, long clientVersion)
+      throws IOException {
+    if (protocol.equals(RPC.getProtocolName(JournalWireProtocol.class))) {
+      return JournalWireProtocol.versionID;
+    }
+    throw new IOException("Namenode Serverside implements " +
+        RPC.getProtocolName(JournalWireProtocol.class) +
+        ". The following requested protocol is unknown: " + protocol);
+  }
+
+  @Override
+  public void journal(NamenodeRegistrationWritable registration,
+      long firstTxnId, int numTxns, byte[] records) throws IOException {
+    server.journal(registration.convert(), firstTxnId, numTxns, records);
+  }
+
+  @Override
+  public void startLogSegment(NamenodeRegistrationWritable registration,
+      long txid) throws IOException {
+    server.startLogSegment(registration.convert(), txid);
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalProtocolTranslatorR23.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalProtocolTranslatorR23.java?rev=1186896&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalProtocolTranslatorR23.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalProtocolTranslatorR23.java Thu Oct 20 16:31:57 2011
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryProxy;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/**
+ * This class forwards NN's ClientProtocol calls as RPC calls to the NN server
+ * while translating from the parameter types used in ClientProtocol to those
+ * used in protocolR23Compatile.*.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class JournalProtocolTranslatorR23 implements
+    JournalProtocol, Closeable {
+  final private JournalWireProtocol rpcProxyWithoutRetry;
+  final private JournalWireProtocol rpcProxy;
+
+  private static JournalWireProtocol createNamenode(
+      InetSocketAddress nameNodeAddr, Configuration conf,
+      UserGroupInformation ugi) throws IOException {
+    return RPC.getProxy(JournalWireProtocol.class,
+        JournalWireProtocol.versionID, nameNodeAddr, ugi, conf,
+        NetUtils.getSocketFactory(conf, JournalWireProtocol.class));
+  }
+
+  /** Create a {@link NameNode} proxy */
+  static JournalWireProtocol createNamenodeWithRetry(
+      JournalWireProtocol rpcNamenode) {
+    RetryPolicy createPolicy = RetryPolicies
+        .retryUpToMaximumCountWithFixedSleep(5,
+            HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
+
+    Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap = 
+        new HashMap<Class<? extends Exception>, RetryPolicy>();
+    remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
+        createPolicy);
+
+    Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = 
+        new HashMap<Class<? extends Exception>, RetryPolicy>();
+    exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
+        .retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
+            remoteExceptionToPolicyMap));
+    RetryPolicy methodPolicy = RetryPolicies.retryByException(
+        RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
+    Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
+
+    methodNameToPolicyMap.put("create", methodPolicy);
+
+    return (JournalWireProtocol) RetryProxy.create(
+        JournalWireProtocol.class, rpcNamenode, methodNameToPolicyMap);
+  }
+
+  public JournalProtocolTranslatorR23(InetSocketAddress nameNodeAddr,
+      Configuration conf) throws IOException {
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    rpcProxyWithoutRetry = createNamenode(nameNodeAddr, conf, ugi);
+    rpcProxy = createNamenodeWithRetry(rpcProxyWithoutRetry);
+  }
+
+  @Override
+  public void close() {
+    RPC.stopProxy(rpcProxyWithoutRetry);
+  }
+
+  @Override
+  public long getProtocolVersion(String protocolName, long clientVersion)
+      throws IOException {
+    return rpcProxy.getProtocolVersion(protocolName, clientVersion);
+  }
+
+  @Override
+  public ProtocolSignature getProtocolSignature(String protocol,
+      long clientVersion, int clientMethodsHash) throws IOException {
+    return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
+        protocol, clientVersion, clientMethodsHash));
+  }
+
+  @Override
+  public void journal(NamenodeRegistration registration, long firstTxnId,
+      int numTxns, byte[] records) throws IOException {
+    rpcProxy.journal(NamenodeRegistrationWritable.convert(registration),
+        firstTxnId, numTxns, records);
+  }
+
+  @Override
+  public void startLogSegment(NamenodeRegistration registration, long txid)
+      throws IOException {
+    rpcProxy.startLogSegment(NamenodeRegistrationWritable.convert(registration),
+        txid);
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalWireProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalWireProtocol.java?rev=1186896&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalWireProtocol.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/JournalWireProtocol.java Thu Oct 20 16:31:57 2011
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.security.KerberosInfo;
+
+/**
+ * This class defines the actual protocol used to communicate with the
+ * NN via RPC using writable types.
+ * The parameters in the methods which are specified in the
+ * package are separate from those used internally in the NN and DFSClient
+ * and hence need to be converted using {@link JournalProtocolTranslatorR23}
+ * and {@link JournalProtocolServerSideTranslatorR23}.
+ *
+ */
+@KerberosInfo(
+    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
+    clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
+@InterfaceAudience.Private
+public interface JournalWireProtocol extends VersionedProtocol {
+  
+  /**
+   * The  rules for changing this protocol are the same as that for
+   * {@link ClientNamenodeWireProtocol} - see that java file for details.
+   */
+  public static final long versionID = 1L;
+
+  /**
+   * Journal edit records.
+   * This message is sent by the active name-node to the backup node
+   * via {@code EditLogBackupOutputStream} in order to synchronize meta-data
+   * changes with the backup namespace image.
+   * 
+   * @param registration active node registration
+   * @param firstTxnId the first transaction of this batch
+   * @param numTxns number of transactions
+   * @param records byte array containing serialized journal records
+   */
+  public void journal(NamenodeRegistrationWritable registration,
+                      long firstTxnId,
+                      int numTxns,
+                      byte[] records) throws IOException;
+
+  /**
+   * Notify the BackupNode that the NameNode has rolled its edit logs
+   * and is now writing a new log segment.
+   * @param registration the registration of the active NameNode
+   * @param txid the first txid in the new log
+   */
+  public void startLogSegment(NamenodeRegistrationWritable registration,
+      long txid) throws IOException;
+  
+  /**
+   * This method is defined to get the protocol signature using 
+   * the R23 protocol - hence we have added the suffix of 2 the method name
+   * to avoid conflict.
+   */
+  public org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable
+           getProtocolSignature2(String protocol, long clientVersion,
+      int clientMethodsHash) throws IOException;
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/NamenodeRegistrationWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/NamenodeRegistrationWritable.java?rev=1186896&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/NamenodeRegistrationWritable.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/NamenodeRegistrationWritable.java Thu Oct 20 16:31:57 2011
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+
+/**
+ * Information sent by a subordinate name-node to the active name-node
+ * during the registration process. 
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class NamenodeRegistrationWritable implements Writable {
+  private String rpcAddress;    // RPC address of the node
+  private String httpAddress;   // HTTP address of the node
+  private NamenodeRole role;    // node role
+  private StorageInfoWritable storageInfo;
+
+  public NamenodeRegistrationWritable() { }
+
+  public NamenodeRegistrationWritable(String address,
+                              String httpAddress,
+                              NamenodeRole role,
+                              StorageInfo storageInfo) {
+    this.rpcAddress = address;
+    this.httpAddress = httpAddress;
+    this.role = role;
+    this.storageInfo = StorageInfoWritable.convert(storageInfo);
+  }
+
+  /////////////////////////////////////////////////
+  // Writable
+  /////////////////////////////////////////////////
+  static {
+    WritableFactories.setFactory
+      (NamenodeRegistrationWritable.class,
+       new WritableFactory() {
+          public Writable newInstance() {
+            return new NamenodeRegistrationWritable();
+          }
+       });
+  }
+
+  @Override // Writable
+  public void write(DataOutput out) throws IOException {
+    Text.writeString(out, rpcAddress);
+    Text.writeString(out, httpAddress);
+    Text.writeString(out, role.name());
+    storageInfo.write(out);
+  }
+
+  @Override // Writable
+  public void readFields(DataInput in) throws IOException {
+    rpcAddress = Text.readString(in);
+    httpAddress = Text.readString(in);
+    role = NamenodeRole.valueOf(Text.readString(in));
+    storageInfo.readFields(in);
+  }
+
+  public static NamenodeRegistrationWritable convert(NamenodeRegistration reg) {
+    return new NamenodeRegistrationWritable(reg.getAddress(),
+        reg.getHttpAddress(), reg.getRole(),
+        reg);
+  }
+
+  public NamenodeRegistration convert() {
+    return new NamenodeRegistration(rpcAddress, httpAddress,
+        storageInfo.convert(), role);
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/StorageInfoWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/StorageInfoWritable.java?rev=1186896&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/StorageInfoWritable.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/StorageInfoWritable.java Thu Oct 20 16:31:57 2011
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+import org.apache.hadoop.io.WritableUtils;
+
+/**
+ * Common writable class for storage information.
+ */
+@InterfaceAudience.Private
+public class StorageInfoWritable implements Writable {
+  private int layoutVersion;
+  private int namespaceID;
+  private String clusterID;
+  private long cTime;
+  
+  public StorageInfoWritable () {
+    this(0, 0, "", 0L);
+  }
+  
+  public StorageInfoWritable(int layoutV, int nsID, String cid, long cT) {
+    layoutVersion = layoutV;
+    clusterID = cid;
+    namespaceID = nsID;
+    cTime = cT;
+  }
+  
+  /////////////////////////////////////////////////
+  // Writable
+  /////////////////////////////////////////////////
+  static {
+    WritableFactories.setFactory(StorageInfoWritable.class,
+        new WritableFactory() {
+          public Writable newInstance() {
+            return new StorageInfoWritable();
+          }
+        });
+  }
+  
+  public void write(DataOutput out) throws IOException {
+    out.writeInt(layoutVersion);
+    out.writeInt(namespaceID);
+    WritableUtils.writeString(out, clusterID);
+    out.writeLong(cTime);
+  }
+
+  public void readFields(DataInput in) throws IOException {
+    layoutVersion = in.readInt();
+    namespaceID = in.readInt();
+    clusterID = WritableUtils.readString(in);
+    cTime = in.readLong();
+  }
+
+  public StorageInfo convert() {
+    return new StorageInfo(layoutVersion, namespaceID, clusterID, cTime);
+  }
+  
+  public static StorageInfoWritable convert(StorageInfo from) {
+    return new StorageInfoWritable(from.getLayoutVersion(),
+        from.getNamespaceID(), from.getClusterID(), from.getCTime());
+  }
+}
\ No newline at end of file

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1186896&r1=1186895&r2=1186896&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Thu Oct 20 16:31:57 2011
@@ -28,10 +28,12 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocolR23Compatible.JournalProtocolServerSideTranslatorR23;
+import org.apache.hadoop.hdfs.protocolR23Compatible.JournalWireProtocol;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -187,13 +189,17 @@ public class BackupNode extends NameNode
     super.stop();
   }
 
-  static class BackupNodeRpcServer extends NameNodeRpcServer implements JournalProtocol {
+  static class BackupNodeRpcServer extends NameNodeRpcServer implements
+      JournalProtocol {
     private final String nnRpcAddress;
     
     private BackupNodeRpcServer(Configuration conf, BackupNode nn)
         throws IOException {
       super(conf, nn);
-      this.server.addProtocol(JournalProtocol.class, this);
+      JournalProtocolServerSideTranslatorR23 journalProtocolTranslator = 
+          new JournalProtocolServerSideTranslatorR23(this);
+      this.server.addProtocol(JournalWireProtocol.class,
+          journalProtocolTranslator);
       nnRpcAddress = nn.nnRpcAddress;
     }
 
@@ -202,9 +208,8 @@ public class BackupNode extends NameNode
         throws IOException {
       if (protocol.equals(JournalProtocol.class.getName())) {
         return JournalProtocol.versionID;
-      } else {
-        return super.getProtocolVersion(protocol, clientVersion);
       }
+      return super.getProtocolVersion(protocol, clientVersion);
     }
   
     /////////////////////////////////////////////////////
@@ -281,7 +286,7 @@ public class BackupNode extends NameNode
     // connect to name node
     InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
     this.namenode =
-      (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
+      RPC.waitForProxy(NamenodeProtocol.class,
           NamenodeProtocol.versionID, nnAddress, conf);
     this.nnRpcAddress = getHostPortString(nnAddress);
     this.nnHttpAddress = getHostPortString(super.getHttpServerAddress(conf));
@@ -295,7 +300,9 @@ public class BackupNode extends NameNode
         LOG.info("Problem connecting to server: " + nnAddress);
         try {
           Thread.sleep(1000);
-        } catch (InterruptedException ie) {}
+        } catch (InterruptedException ie) {
+          LOG.warn("Encountered exception ", e);
+        }
       }
     }
     return nsInfo;
@@ -344,7 +351,9 @@ public class BackupNode extends NameNode
         LOG.info("Problem connecting to name-node: " + nnRpcAddress);
         try {
           Thread.sleep(1000);
-        } catch (InterruptedException ie) {}
+        } catch (InterruptedException ie) {
+          LOG.warn("Encountered exception ", e);
+        }
       }
     }
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java?rev=1186896&r1=1186895&r2=1186896&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java Thu Oct 20 16:31:57 2011
@@ -22,6 +22,7 @@ import java.net.InetSocketAddress;
 import java.util.Arrays;
 
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.protocolR23Compatible.JournalProtocolTranslatorR23;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -56,8 +57,7 @@ class EditLogBackupOutputStream extends 
       NetUtils.createSocketAddr(bnRegistration.getAddress());
     try {
       this.backupNode =
-        RPC.getProxy(JournalProtocol.class,
-            JournalProtocol.versionID, bnAddress, new HdfsConfiguration());
+          new JournalProtocolTranslatorR23(bnAddress, new HdfsConfiguration());
     } catch(IOException e) {
       Storage.LOG.error("Error connecting to: " + bnAddress, e);
       throw e;

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java?rev=1186896&r1=1186895&r2=1186896&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java Thu Oct 20 16:31:57 2011
@@ -21,6 +21,8 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
+import org.apache.hadoop.hdfs.protocolR23Compatible.JournalWireProtocol;
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.KerberosInfo;
 
@@ -33,6 +35,17 @@ import org.apache.hadoop.security.Kerber
     clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
 @InterfaceAudience.Private
 public interface JournalProtocol extends VersionedProtocol {
+  /**
+   * 
+   * This class is used by both the Namenode (client) and BackupNode (server) 
+   * to insulate from the protocol serialization.
+   * 
+   * If you are adding/changing DN's interface then you need to 
+   * change both this class and ALSO
+   * {@link JournalWireProtocol}.
+   * These changes need to be done in a compatible fashion as described in 
+   * {@link ClientNamenodeWireProtocol}
+   */
   public static final long versionID = 1L;
 
   /**

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java?rev=1186896&r1=1186895&r2=1186896&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java Thu Oct 20 16:31:57 2011
@@ -52,10 +52,9 @@ implements NodeRegistration {
                               String httpAddress,
                               StorageInfo storageInfo,
                               NamenodeRole role) {
-    super();
+    super(storageInfo);
     this.rpcAddress = address;
     this.httpAddress = httpAddress;
-    this.setStorageInfo(storageInfo);
     this.role = role;
   }
 
@@ -64,6 +63,10 @@ implements NodeRegistration {
     return rpcAddress;
   }
   
+  public String getHttpAddress() {
+    return httpAddress;
+  }
+  
   @Override // NodeRegistration
   public String getRegistrationID() {
     return Storage.getRegistrationID(this);