You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/09/27 07:39:44 UTC

svn commit: r1390847 - in /hbase/trunk/hbase-server/src: main/java/org/apache/hadoop/hbase/client/ main/java/org/apache/hadoop/hbase/io/hfile/ main/java/org/apache/hadoop/hbase/ipc/ main/java/org/apache/hadoop/hbase/master/ main/java/org/apache/hadoop/...

Author: stack
Date: Thu Sep 27 05:39:43 2012
New Revision: 1390847

URL: http://svn.apache.org/viewvc?rev=1390847&view=rev
Log:
HBASE-6876 Clean up WARNs and log messages around startup; REVERT OF OVERCOMMIT

Modified:
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcEngine.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java?rev=1390847&r1=1390846&r2=1390847&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java Thu Sep 27 05:39:43 2012
@@ -2450,7 +2450,7 @@ public class HConnectionManager {
       c.getInt("hbase.client.serverside.retries.multiplier", 10);
     int retries = hcRetries * serversideMultiplier;
     c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
-    log.debug("HConnection retries=" + retries);
+    log.debug("Set serverside HConnection retries=" + retries);
   }
 }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java?rev=1390847&r1=1390846&r2=1390847&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java Thu Sep 27 05:39:43 2012
@@ -829,7 +829,7 @@ public class HFileBlockIndex {
      * @throws IOException
      */
     public long writeIndexBlocks(FSDataOutputStream out) throws IOException {
-      if (curInlineChunk != null && curInlineChunk.getNumEntries() != 0) {
+      if (curInlineChunk.getNumEntries() != 0) {
         throw new IOException("Trying to write a multi-level block index, " +
             "but are " + curInlineChunk.getNumEntries() + " entries in the " +
             "last inline chunk.");
@@ -840,11 +840,9 @@ public class HFileBlockIndex {
       byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata()
           : null;
 
-      if (curInlineChunk != null) {
-        while (rootChunk.getRootSize() > maxChunkSize) {
-          rootChunk = writeIntermediateLevel(out, rootChunk);
-          numLevels += 1;
-        }
+      while (rootChunk.getRootSize() > maxChunkSize) {
+        rootChunk = writeIntermediateLevel(out, rootChunk);
+        numLevels += 1;
       }
 
       // write the root level
@@ -1006,18 +1004,11 @@ public class HFileBlockIndex {
      */
     @Override
     public boolean shouldWriteBlock(boolean closing) {
-      if (singleLevelOnly) {
+      if (singleLevelOnly)
         throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
-      }
 
-      if (curInlineChunk == null) {
-        throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been " +
-            "called with closing=true and then called again?");
-      }
-
-      if (curInlineChunk.getNumEntries() == 0) {
+      if (curInlineChunk.getNumEntries() == 0)
         return false;
-      }
 
       // We do have some entries in the current inline chunk.
       if (closing) {
@@ -1027,7 +1018,7 @@ public class HFileBlockIndex {
 
           expectNumLevels(1);
           rootChunk = curInlineChunk;
-          curInlineChunk = null;  // Disallow adding any more index entries.
+          curInlineChunk = new BlockIndexChunk();
           return false;
         }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java?rev=1390847&r1=1390846&r2=1390847&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java Thu Sep 27 05:39:43 2012
@@ -62,8 +62,8 @@ public class HBaseRpcMetrics implements 
 
     metricsRecord.setTag("port", port);
 
-    LOG.info("Initializing RPC Metrics for className="
-        + hostName + " on port=" + port);
+    LOG.info("Initializing RPC Metrics with hostName="
+        + hostName + ", port=" + port);
 
     context.registerUpdater(this);
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java?rev=1390847&r1=1390846&r2=1390847&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java Thu Sep 27 05:39:43 2012
@@ -525,7 +525,6 @@ public abstract class HBaseServer implem
         readers[i] = reader;
         readPool.execute(reader);
       }
-      LOG.info("Started " + readThreads + " reader(s) in Listener.");
 
       // Register accepts on the server socket with the selector.
       acceptChannel.register(selector, SelectionKey.OP_ACCEPT);
@@ -542,6 +541,7 @@ public abstract class HBaseServer implem
         this.readSelector = Selector.open();
       }
       public void run() {
+        LOG.info("Starting " + getName());
         try {
           doRunLoop();
         } finally {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcEngine.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcEngine.java?rev=1390847&r1=1390846&r2=1390847&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcEngine.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcEngine.java Thu Sep 27 05:39:43 2012
@@ -20,12 +20,14 @@ package org.apache.hadoop.hbase.ipc;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-
 import javax.net.SocketFactory;
 
+import org.apache.hadoop.hbase.ipc.VersionedProtocol;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.security.User;
+
+import com.google.protobuf.ServiceException;
 
 /** An RPC implementation. */
 @InterfaceAudience.Private

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java?rev=1390847&r1=1390846&r2=1390847&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java Thu Sep 27 05:39:43 2012
@@ -145,11 +145,10 @@ class ActiveMasterManager extends ZooKee
             this.watcher.getMasterAddressZNode(), this.sn)) {
 
           // If we were a backup master before, delete our ZNode from the backup
-          // master directory since we are the active now)
-          if (ZKUtil.checkExists(this.watcher, backupZNode) != -1) {
-            LOG.info("Deleting ZNode for " + backupZNode + " from backup master directory");
-            ZKUtil.deleteNodeFailSilent(this.watcher, backupZNode);
-          }
+          // master directory since we are the active now
+          LOG.info("Deleting ZNode for " + backupZNode + " from backup master directory");
+          ZKUtil.deleteNodeFailSilent(this.watcher, backupZNode);
+
           // Save the znode in a file, this will allow to check if we crash in the launch scripts
           ZNodeClearer.writeMyEphemeralNodeOnDisk(this.sn.toString());
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1390847&r1=1390846&r2=1390847&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Thu Sep 27 05:39:43 2012
@@ -185,7 +185,6 @@ import org.apache.hadoop.net.DNS;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.Watcher;
 import org.apache.hadoop.hbase.trace.SpanReceiverHost;
-import org.apache.hadoop.hbase.util.FSUtils;
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
@@ -325,8 +324,6 @@ Server {
   public HMaster(final Configuration conf)
   throws IOException, KeeperException, InterruptedException {
     this.conf = new Configuration(conf);
-    LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) +
-      ", hbase.cluster.distributed=" + this.conf.getBoolean("hbase.cluster.distributed", false));
     // Disable the block cache on the master
     this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
     // Set how many times to retry talking to another server over HConnection.
@@ -336,7 +333,7 @@ Server {
       conf.get("hbase.master.dns.interface", "default"),
       conf.get("hbase.master.dns.nameserver", "default")));
     int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT);
-    // Creation of a ISA will force a resolve.
+    // Creation of a HSA will force a resolve.
     InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
     if (initialIsa.getAddress() == null) {
       throw new IllegalArgumentException("Failed resolve of " + initialIsa);
@@ -2294,7 +2291,7 @@ Server {
    * @see org.apache.hadoop.hbase.master.HMasterCommandLine
    */
   public static void main(String [] args) throws Exception {
-    VersionInfo.logVersion();
+	VersionInfo.logVersion();
     new HMasterCommandLine(HMaster.class).doMain(args);
   }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java?rev=1390847&r1=1390846&r2=1390847&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java Thu Sep 27 05:39:43 2012
@@ -134,11 +134,12 @@ public class RecoverableZooKeeper {
         switch (e.code()) {
           case NONODE:
             if (isRetry) {
-              LOG.info("Node " + path + " already deleted. Assuming a " +
+              LOG.info("Node " + path + " already deleted. Assuming that a " +
                   "previous attempt succeeded.");
               return;
             }
-            LOG.warn("Node " + path + " already deleted, retry=" + isRetry);
+            LOG.warn("Node " + path + " already deleted, and this is not a " +
+                     "retry");
             throw e;
 
           case CONNECTIONLOSS:

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java?rev=1390847&r1=1390846&r2=1390847&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java Thu Sep 27 05:39:43 2012
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hadoop.hbase.io.hfile;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
-/**
- * Test a case when an inline index chunk is converted to a root one. This reproduces the bug in
- * HBASE-6871. We write a carefully selected number of relatively large keys so that we accumulate
- * a leaf index chunk that only goes over the configured index chunk size after adding the last
- * key/value. The bug is in that when we close the file, we convert that inline (leaf-level) chunk
- * into a root chunk, but then look at the size of that root chunk, find that it is greater than
- * the configured chunk size, and split it into a number of intermediate index blocks that should
- * really be leaf-level blocks. If more keys were added, we would flush the leaf-level block, add
- * another entry to the root-level block, and that would prevent us from upgrading the leaf-level
- * chunk to the root chunk, thus not triggering the bug. 
- */
-@Category(SmallTests.class)
-public class TestHFileInlineToRootChunkConversion {
-  private final HBaseTestingUtility testUtil = new HBaseTestingUtility();
-  private final Configuration conf = testUtil.getConfiguration();
-  
-  @Test
-  public void testWriteHFile() throws Exception {
-    Path hfPath = new Path(testUtil.getDataTestDir(),
-        TestHFileInlineToRootChunkConversion.class.getSimpleName() + ".hfile");
-    int maxChunkSize = 1024;
-    FileSystem fs = FileSystem.get(conf);
-    CacheConfig cacheConf = new CacheConfig(conf);
-    conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize); 
-    HFileWriterV2 hfw =
-        (HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, cacheConf)
-            .withBlockSize(16)
-            .withPath(fs, hfPath).create();
-    List<byte[]> keys = new ArrayList<byte[]>();
-    StringBuilder sb = new StringBuilder();
-
-    for (int i = 0; i < 4; ++i) {
-      sb.append("key" + String.format("%05d", i));
-      sb.append("_");
-      for (int j = 0; j < 100; ++j) {
-        sb.append('0' + j);
-      }
-      String keyStr = sb.toString();
-      sb.setLength(0);
-
-      byte[] k = Bytes.toBytes(keyStr);
-      System.out.println("Key: " + Bytes.toString(k));
-      keys.add(k);
-      byte[] v = Bytes.toBytes("value" + i);
-      hfw.append(k, v);
-    }
-    hfw.close();
-
-    HFileReaderV2 reader = (HFileReaderV2) HFile.createReader(fs, hfPath, cacheConf);
-    HFileScanner scanner = reader.getScanner(true, true);
-    for (int i = 0; i < keys.size(); ++i) {
-      scanner.seekTo(keys.get(i));
-    }
-    reader.close();
-  }
-}