You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sz...@apache.org on 2008/10/29 22:54:02 UTC
svn commit: r709024 - in /hadoop/core/branches/branch-0.19: CHANGES.txt
src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
src/test/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java
Author: szetszwo
Date: Wed Oct 29 14:54:01 2008
New Revision: 709024
URL: http://svn.apache.org/viewvc?rev=709024&view=rev
Log:
HADOOP-4483 Honor the max parameter in DatanodeDescriptor.getBlockArray(...). (Ahad Rana and Hairong Kuang via szetszwo)
Added:
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java
Modified:
hadoop/core/branches/branch-0.19/CHANGES.txt
hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
Modified: hadoop/core/branches/branch-0.19/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/CHANGES.txt?rev=709024&r1=709023&r2=709024&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.19/CHANGES.txt Wed Oct 29 14:54:01 2008
@@ -875,9 +875,6 @@
HADOOP-4400. Add "hdfs://" to fs.default.name on quickstart.html.
(Jeff Hammerbacher via omalley)
- HADOOP-4403. Make TestLeaseRecovery and TestFileCreation more robust.
- (szetszwo)
-
HADOOP-4378. Fix TestJobQueueInformation to use SleepJob rather than
WordCount via TestMiniMRWithDFS. (Sreekanth Ramakrishnan via acmurthy)
@@ -983,6 +980,9 @@
HADOOP-4351. FSNamesystem.getBlockLocationsInternal throws
ArrayIndexOutOfBoundsException. (hairong)
+ HADOOP-4403. Make TestLeaseRecovery and TestFileCreation more robust.
+ (szetszwo)
+
HADOOP-4292. Do not support append() for LocalFileSystem. (hairong)
HADOOP-4399. Make fuse-dfs multi-thread access safe.
@@ -1005,6 +1005,9 @@
HADOOP-4526. fsck failing with NullPointerException. (hairong)
+ HADOOP-4483 Honor the max parameter in DatanodeDescriptor.getBlockArray(..)
+ (Ahad Rana and Hairong Kuang via szetszwo)
+
NEW FEATURES
HADOOP-2421. Add jdiff output to documentation, listing all API
Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java?rev=709024&r1=709023&r2=709024&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java (original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java Wed Oct 29 14:54:01 2008
@@ -329,14 +329,36 @@
static private Block[] getBlockArray(Collection<Block> blocks, int max) {
Block[] blockarray = null;
synchronized(blocks) {
- int n = blocks.size();
+ int available = blocks.size();
+ int n = available;
if (max > 0 && n > 0) {
if (max < n) {
n = max;
}
- blockarray = blocks.toArray(new Block[n]);
- blocks.clear();
- assert(blockarray.length > 0);
+ // allocate the properly sized block array ...
+ blockarray = new Block[n];
+
+ // iterate tree collecting n blocks...
+ Iterator<Block> e = blocks.iterator();
+ int blockCount = 0;
+
+ while (blockCount < n && e.hasNext()) {
+ // insert into array ...
+ blockarray[blockCount++] = e.next();
+
+ // remove from tree via iterator, if we are removing
+ // less than total available blocks
+ if (n < available){
+ e.remove();
+ }
+ }
+ assert(blockarray.length == n);
+
+ // now if the number of blocks removed equals available blocks,
+ // them remove all blocks in one fell swoop via clear
+ if (n == available) {
+ blocks.clear();
+ }
}
}
return blockarray;
Added: hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java?rev=709024&view=auto
==============================================================================
--- hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java (added)
+++ hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java Wed Oct 29 14:54:01 2008
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.ArrayList;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+
+import junit.framework.TestCase;
+
+/**
+ * This class tests that methods in DatanodeDescriptor
+ */
+public class TestDatanodeDescriptor extends TestCase {
+ /**
+ * Test that getInvalidateBlocks observes the maxlimit.
+ */
+ public void testGetInvalidateBlocks() throws Exception {
+ final int MAX_BLOCKS = 10;
+ final int REMAINING_BLOCKS = 2;
+ final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
+
+ DatanodeDescriptor dd = new DatanodeDescriptor();
+ ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
+ for (int i=0; i<MAX_BLOCKS; i++) {
+ blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
+ }
+ dd.addBlocksToBeInvalidated(blockList);
+ BlockCommand bc = dd.getInvalidateBlocks(MAX_LIMIT);
+ assertEquals(bc.getBlocks().length, MAX_LIMIT);
+ bc = dd.getInvalidateBlocks(MAX_LIMIT);
+ assertEquals(bc.getBlocks().length, REMAINING_BLOCKS);
+ }
+}