You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2010/01/09 22:10:05 UTC
svn commit: r897547 [9/10] - in
/hadoop/hbase/branches/0.20_on_hadoop-0.18.3: ./ bin/ conf/ lib/
src/contrib/ src/contrib/ec2/ src/contrib/ec2/bin/
src/contrib/ec2/bin/image/ src/contrib/indexed/ src/contrib/indexed/lib/
src/contrib/indexed/lib/fmpp-0....
Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestShortArrayList.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestShortArrayList.java?rev=897547&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestShortArrayList.java (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/arrays/TestShortArrayList.java Sat Jan 9 21:09:59 2010
@@ -0,0 +1,349 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.arrays;
+
+import junit.framework.Assert;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.commons.lang.ArrayUtils;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+
+
+public class TestShortArrayList extends HBaseTestCase {
+
+
+
+ private static final int[] INVALID_INDEXES = {0, -1, 1};
+
+ /**
+ * Verifies that the initial size constructor initialises as expected.
+ */
+ public void testInitialSizeAndEmpty() {
+ ShortArrayList test = new ShortArrayList();
+ checkSizeAndCapacity(test, 0, 1);
+ Assert.assertTrue(test.isEmpty());
+
+ test = new ShortArrayList(1000);
+ checkSizeAndCapacity(test, 0, 1000);
+ Assert.assertTrue(test.isEmpty());
+
+ test.add((short) 5);
+ Assert.assertFalse(test.isEmpty());
+ }
+
+ /**
+ * Verifies copy constructor.
+ */
+ public void testCopyConstructor() {
+ // Create an original with a capacity of 2, but only one entry
+ ShortArrayList original = new ShortArrayList(2);
+ original.add((short) 1);
+ short[] values = (short[]) getField(original, "values");
+ Assert.assertEquals(values.length, 2);
+ Assert.assertEquals(original.size(), 1);
+
+ // Create a copy of the original and check that its size + capacity are the minimum
+ ShortArrayList copy = new ShortArrayList(original);
+ Assert.assertEquals(copy.size(), 1);
+ Assert.assertEquals(copy.get(0), (short) 1);
+ values = (short[]) getField(copy, "values");
+ Assert.assertEquals(values.length, 1);
+ }
+
+ /**
+ * Ensures the equals() method behaves as expected.
+ */
+ public void testEquals() {
+ ShortArrayList test1a = new ShortArrayList();
+ test1a.add((short) 1);
+ ShortArrayList test1b = new ShortArrayList();
+ test1b.add((short) 1);
+ ShortArrayList test2 = new ShortArrayList();
+ test2.add((short) 2);
+
+ Assert.assertTrue(test1a.equals(test1b));
+ Assert.assertFalse(test1a.equals(test2));
+ }
+
+
+ /**
+ * Ensures the number of elements in the list and its backing capacity are what we expect.
+ *
+ * @param test the list to test
+ * @param size the expected number of elements in the list
+ * @param capacity the expected capacity
+ */
+ private void checkSizeAndCapacity(ShortArrayList test, int size, int capacity) {
+ Assert.assertEquals(test.size(), size);
+
+ short[] values = (short[]) getField(test, "values");
+
+ Assert.assertEquals(values.length, capacity);
+ }
+
+ /**
+ * Tests that adding elements grows the array size and capacity as expected.
+ */
+ public void testAddGetAndGrow() {
+ // Initialise
+ ShortArrayList test = new ShortArrayList();
+ checkSizeAndCapacity(test, 0, 1);
+
+ // Add the first element and we expect the capacity to be unchanged since we don't have any spots consumed.
+ test.add((short) 1);
+ Assert.assertEquals(test.get(0), (short) 1);
+ checkSizeAndCapacity(test, 1, 1);
+
+ // Add the next element and we expect the capacity to grow by one only
+ test.add((short) 2);
+ Assert.assertEquals(test.get(1), (short) 2);
+ checkSizeAndCapacity(test, 2, 2);
+
+ // Add the next element and we expect the capacity to grow by two
+ test.add((short) 3);
+ Assert.assertEquals(test.get(2), (short) 3);
+ checkSizeAndCapacity(test, 3, 4);
+
+ // Add the next element and we expect the capacity to be unchanged
+ test.add((short) 4);
+ Assert.assertEquals(test.get(3), (short) 4);
+ checkSizeAndCapacity(test, 4, 4);
+
+ // Add the next element and we expect the capacity to be 1.5+1 times larger
+ test.add((short) 5);
+ Assert.assertEquals(test.get(4), (short) 5);
+ checkSizeAndCapacity(test, 5, 7);
+ }
+
+ /**
+ * Tests get() with various invalid ranges.
+ */
+ public void testInvalidGet() {
+ for (int index : INVALID_INDEXES) {
+ try {
+ ShortArrayList test = new ShortArrayList();
+ test.get(index);
+ } catch (ArrayIndexOutOfBoundsException ignored) {
+ continue;
+ }
+ Assert.fail("Expected an array index out of bounds exception");
+ }
+ }
+
+
+ /**
+ * Tests the indexOf() and set() methods.
+ */
+ public void testIndexOfAndSet() {
+ ShortArrayList test = new ShortArrayList();
+
+ // Test with first value added to list
+ short testValue = (short) 42;
+ Assert.assertEquals(test.indexOf(testValue), -1);
+ test.add(testValue);
+ Assert.assertEquals(test.indexOf(testValue), 0);
+
+ // Add a second one
+ testValue = (short) 43;
+ Assert.assertEquals(test.indexOf(testValue), -1);
+ test.add(testValue);
+ Assert.assertEquals(test.indexOf(testValue), 1);
+
+ // Change the first to a new value
+ testValue = (short) 41;
+ Assert.assertEquals(test.indexOf(testValue), -1);
+ test.set(0, testValue);
+ Assert.assertEquals(test.indexOf(testValue), 0);
+ }
+
+ /**
+ * Tests the Searchable implementation.
+ */
+ public void testSearchable() {
+ ShortArrayList test = new ShortArrayList();
+
+ // Test with first value added to list
+ short testValue = (short) 42;
+ Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), -1);
+ test.add(testValue);
+ Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), 0);
+
+ // Add a second one
+ testValue = (short) 43;
+ Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), -2);
+ test.add(testValue);
+ Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), 1);
+
+ // Search for something off the start
+ testValue = (short) 41;
+ Assert.assertEquals(BinarySearch.search(test, test.size(), testValue), -1);
+ }
+
+ /**
+ * Tests set() with various invalid ranges.
+ */
+ public void testInvalidSet() {
+ for (int index : INVALID_INDEXES) {
+ try {
+ ShortArrayList test = new ShortArrayList();
+ test.set(index, (short) 0);
+ } catch (ArrayIndexOutOfBoundsException ignored) {
+ continue;
+ }
+ Assert.fail("Expected an array index out of bounds exception");
+ }
+ }
+
+
+ /**
+ * Tests iteration via the Iterable interface.
+ */
+ public void testIterable() {
+ final java.util.List<Short> testData = new ArrayList<Short>();
+
+ // Test with no content first
+ ShortArrayList test = new ShortArrayList();
+ testData.clear();
+ for (short item : test) {
+ testData.add(item);
+ }
+ Assert.assertEquals(testData.size(), 0);
+
+ // Add a value and ensure it is returned
+ test.add((short) 1);
+ testData.clear();
+ for (short item : test) {
+ testData.add(item);
+ }
+ Assert.assertTrue(ArrayUtils.isEquals(testData.toArray(),
+ new Object[]{(short) 1}));
+
+ // Add another value and ensure it is returned
+ test.add((short) 1);
+ testData.clear();
+ for (short item : test) {
+ testData.add(item);
+ }
+ Assert.assertTrue(ArrayUtils.isEquals(testData.toArray(),
+ new Object[]{(short) 1, (short) 1}));
+ }
+
+ /**
+ * Tests the remove() method.
+ */
+ public void testRemove() {
+ ShortArrayList test = new ShortArrayList();
+ test.add((short) 1);
+ Assert.assertEquals(test.get(0), (short) 1);
+ //Assert.assertEquals(test.get(0), (short) 1);
+ test.remove(0);
+ Assert.assertTrue(test.isEmpty());
+
+ // Add some
+ test.add((short) 0);
+ test.add((short) 1);
+ test.add((short) 2);
+
+ // Remove a value from the middle and ensure correct operation
+ Assert.assertEquals(test.remove(1), (short) 1);
+ Assert.assertEquals(test.get(0), (short) 0);
+ Assert.assertEquals(test.get(1), (short) 2);
+ }
+
+ /**
+ * Tests the remove() method.
+ */
+ public void testInsert() {
+ ShortArrayList test = new ShortArrayList();
+ test.insert(0, (short) 1);
+ Assert.assertEquals(test.get(0), (short) 1);
+ Assert.assertEquals(test.size(), 1);
+
+ test.insert(0, (short) 0);
+ Assert.assertEquals(test.get(0), (short) 0);
+ Assert.assertEquals(test.get(1), (short) 1);
+ Assert.assertEquals(test.size(), 2);
+
+ test.insert(1, (short) 2);
+ Assert.assertEquals(test.get(0), (short) 0);
+ Assert.assertEquals(test.get(1), (short) 2);
+ Assert.assertEquals(test.get(2), (short) 1);
+ Assert.assertEquals(test.size(), 3);
+
+ test.insert(3, (short) 3);
+ Assert.assertEquals(test.get(0), (short) 0);
+ Assert.assertEquals(test.get(1), (short) 2);
+ Assert.assertEquals(test.get(2), (short) 1);
+ Assert.assertEquals(test.get(3), (short) 3);
+ Assert.assertEquals(test.size(), 4);
+ }
+
+ /**
+ * Verifies the removeLast() method works as expected.
+ */
+ public void testRemoveLast() {
+ ShortArrayList test = new ShortArrayList();
+ test.add((short) 1);
+ test.add((short) 2);
+
+ Assert.assertEquals(test.removeLast(), (short) 2);
+ Assert.assertEquals(test.get(0), (short) 1);
+
+ Assert.assertEquals(test.removeLast(), (short) 1);
+ Assert.assertTrue(test.isEmpty());
+ }
+
+ /**
+ * Tests remove() with various invalid ranges.
+ */
+ public void testInvalidRemove() {
+ for (int index : INVALID_INDEXES) {
+ try {
+ ShortArrayList test = new ShortArrayList();
+ test.remove(index);
+ } catch (ArrayIndexOutOfBoundsException ignored) {
+ continue;
+ }
+ Assert.fail("Expected an array index out of bounds exception");
+ }
+ }
+
+ /**
+ * Extracts a declared field from a given object.
+ *
+ * @param target the object from which to extract the field
+ * @param name the name of the field
+ * @return the declared field
+ */
+ public static Object getField(Object target, String name) {
+ try {
+ Field field = target.getClass().getDeclaredField(name);
+ field.setAccessible(true);
+ return field.get(target);
+ } catch (IllegalAccessException e) {
+ Assert.fail("Exception " + e);
+ } catch (NoSuchFieldException e) {
+ Assert.fail("Exception " + e);
+ }
+ return null;
+ }
+
+}
Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBaseTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBaseTestCase.java?rev=897547&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBaseTestCase.java (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBaseTestCase.java Sat Jan 9 21:09:59 2010
@@ -0,0 +1,292 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.sets;
+
+import junit.framework.Assert;
+import junit.framework.AssertionFailedError;
+import org.apache.hadoop.hbase.HBaseTestCase;
+
+/**
+ * A base test-case of {@link IntSets}.
+ */
+public abstract class IntSetBaseTestCase extends HBaseTestCase {
+
+ private static final int[] SOME = new int[]{0, 10, 63, 64, 99, 103, 104, 200,
+ 800, 805};
+
+ /**
+ * Creates a sparse bitset with the given elements and maximum.
+ *
+ * @param capacity the maximum
+ * @param sortedElements the elements assumed to be sorted
+ * @return the new sparse bitset.
+ */
+ public static IntSetBase createSparseBitSet(int capacity,
+ int... sortedElements) {
+ SparseBitSet bitSet = new SparseBitSet();
+ for (int element : sortedElements) {
+ bitSet.addNext(element);
+ }
+ bitSet.setCapacity(capacity);
+ return bitSet;
+ }
+
+ /**
+ * Creates a bitset with the given elements and maximum.
+ *
+ * @param capacity the maximum
+ * @param sortedElements the elements assumed to be sorted
+ * @return the new bitset.
+ */
+ public static IntSetBase createBitSet(int capacity, int... sortedElements) {
+ BitSet bitSet = new BitSet(capacity);
+ for (int element : sortedElements) {
+ bitSet.addNext(element);
+ }
+ return bitSet;
+ }
+
+ protected abstract IntSetBase newSet(int capacity, int... sortedElements);
+
+ protected void addSome(IntSetBase bitSet) {
+ for (int next : SOME) {
+ bitSet.addNext(next);
+ }
+ }
+
+ protected static void fill(IntSetBase bitSet) {
+ for (int i = 0; i < bitSet.capacity(); i++) {
+ bitSet.addNext(i);
+ }
+ }
+
+
+ protected void assertSetsEqual(IntSet set1, IntSet set2) {
+ Assert.assertEquals(set1.capacity(), set2.capacity());
+ Assert.assertEquals(set1.size(), set2.size());
+ IntSet.IntSetIterator iter1 = set1.iterator(), iter2 = set2.iterator();
+ while (iter1.hasNext() || iter2.hasNext()) {
+ Assert.assertEquals(iter1.next(), iter2.next());
+ }
+ }
+
+ protected void assertSetsNotEqual(IntSetBase set1, IntSetBase set2) {
+ try {
+ assertSetsEqual(set1, set2);
+ fail("Sets are equal");
+ } catch (AssertionFailedError ignored) {
+ }
+ }
+
+ public void testAdd() {
+ IntSetBase bitSet = newSet(1000);
+
+ try {
+ bitSet.addNext(-1);
+ Assert.fail("expected an error");
+ } catch (AssertionError ignored) {
+ }
+
+ addSome(bitSet);
+ Assert.assertEquals(bitSet.size(), SOME.length);
+
+ try {
+ bitSet.addNext(805);
+ Assert.fail("expected an error");
+ } catch (AssertionError ignored) {
+ }
+
+ try {
+ bitSet.addNext(1000);
+ Assert.fail("expected an error");
+ } catch (AssertionError ignored) {
+ }
+ }
+
+ public void testContains() {
+ IntSetBase intSet = newSet(1000);
+ addSome(intSet);
+ for (int next : SOME) {
+ Assert.assertTrue(intSet.contains(next));
+ }
+
+ int sum = 0;
+ for (int i = 0; i < intSet.capacity(); i++) {
+ sum += intSet.contains(i) ? 1 : 0;
+ }
+ Assert.assertEquals(sum, SOME.length);
+ }
+
+ public void testClear() {
+ IntSetBase intSet = newSet(1000);
+ Assert.assertEquals(intSet.size(), 0);
+ Assert.assertTrue(intSet.isEmpty());
+ intSet.clear();
+
+ addClearAndCheck(intSet);
+ addClearAndCheck(intSet);
+ }
+
+
+ private void addClearAndCheck(IntSetBase intSetBase) {
+ addSome(intSetBase);
+ Assert.assertEquals(intSetBase.size(), SOME.length);
+ Assert.assertFalse(intSetBase.isEmpty());
+ intSetBase.clear();
+ Assert.assertEquals(intSetBase.size(), 0);
+ Assert.assertTrue(intSetBase.isEmpty());
+ }
+
+ public void testClone() {
+ IntSetBase intSet = newSet(10000);
+ IntSetBase otherIntSet = (IntSetBase) intSet.clone();
+ assertSetsEqual(intSet, otherIntSet);
+
+ addSome(intSet);
+ assertSetsNotEqual(intSet, otherIntSet);
+
+ otherIntSet = (IntSetBase) intSet.clone();
+ assertSetsEqual(intSet, otherIntSet);
+
+ intSet.addNext(1001);
+ Assert.assertEquals(intSet.size(), otherIntSet.size() + 1);
+ Assert.assertFalse(otherIntSet.contains(1001));
+ }
+
+ public void testIterator() {
+ IntSetBase intSet = newSet(1000);
+ IntSet.IntSetIterator iter = intSet.iterator();
+ Assert.assertFalse(iter.hasNext());
+
+ addSome(intSet);
+ iter = intSet.iterator();
+ for (int num : SOME) {
+ Assert.assertTrue(iter.hasNext());
+ Assert.assertEquals(num, iter.next());
+ }
+ Assert.assertFalse(iter.hasNext());
+
+ intSet = new BitSet(1000);
+ fill(intSet);
+ iter = intSet.iterator();
+ for (int num = 0; num < 1000; num++) {
+ Assert.assertTrue(iter.hasNext());
+ Assert.assertEquals(num, iter.next());
+ }
+ Assert.assertFalse(iter.hasNext());
+ }
+
+
+ public void testComplement() {
+ for (int capacity = 950; capacity < 1050; capacity++) {
+ IntSetBase intSet = newSet(capacity);
+ Assert.assertEquals(intSet.size(), 0);
+ Assert.assertEquals(intSet.complement().size(), capacity);
+ }
+
+ IntSetBase intSet = newSet(1001);
+ addSome(intSet);
+ BitSet cBitSet = (BitSet) intSet.clone().complement();
+ Assert.assertEquals(cBitSet.size() + intSet.size(), 1001);
+ for (int i = 0; i < 1001; i++) {
+ Assert.assertTrue(intSet.contains(i) != cBitSet.contains(i));
+ }
+ }
+
+ public void testIntersect() {
+ IntSetBase intset1 = newSet(1013, 3, 7, 34, 87, 178, 244, 507, 643, 765,
+ 999);
+ IntSetBase intset2 = newSet(1013);
+
+ Assert.assertTrue(intset1.clone().intersect(intset2).isEmpty());
+ Assert.assertTrue(intset2.clone().intersect(intset1).isEmpty());
+
+ assertSetsEqual(intset1.clone().intersect(intset1.clone()), intset1);
+ intset2 = newSet(1013);
+ fill(intset2);
+ assertSetsEqual(intset1.clone().intersect(intset2), intset1);
+
+ assertSetsEqual(intset1.clone().intersect(newSet(1013, 34, 63, 64, 65, 107,
+ 244, 340, 765, 894, 1012)),
+ newSet(1013, 34, 244, 765));
+ }
+
+
+ public void testUnite() {
+ IntSetBase intset1 = newSet(1013, 3, 7, 34, 87, 178, 244, 507, 643, 765,
+ 999);
+ IntSetBase intset2 = newSet(1013);
+
+ assertSetsEqual(intset1.clone().unite(intset2), intset1);
+ assertSetsEqual(intset2.clone().unite(intset1), intset1);
+
+ assertSetsEqual(intset1.clone().unite(intset1.clone()), intset1);
+ intset2 = newSet(1013);
+ fill(intset2);
+ assertSetsEqual(intset1.clone().unite(intset2), intset2);
+
+ assertSetsEqual(intset1.clone().unite(newSet(1013, 34, 63, 64, 65, 107, 244,
+ 340, 765, 894, 1012)),
+ newSet(1013, 3, 7, 34, 63, 64, 65, 87, 107, 178, 244, 340, 507, 643, 765
+ , 894, 999, 1012));
+ }
+
+ public void testSubtract() {
+ IntSetBase intset1 = newSet(1013, 3, 7, 34, 87, 178, 244, 507, 643,
+ 765, 999);
+ IntSetBase intset2 = newSet(1013);
+
+ assertSetsEqual(intset1.clone().subtract(intset2), intset1);
+ assertSetsEqual(intset2.clone().subtract(intset1), intset2);
+
+ assertSetsEqual(intset1.clone().subtract(intset1.clone()), intset2);
+ intset2 = newSet(1013);
+ fill(intset2);
+ assertSetsEqual(intset1.clone().subtract(intset2), newSet(1013));
+ assertSetsEqual(intset2.clone().subtract(intset1),
+ intset1.clone().complement());
+
+ assertSetsEqual(intset1.clone().subtract(newSet(1013, 34, 63, 64, 65,
+ 107, 244, 340, 765, 894, 1012)),
+ newSet(1013, 3, 7, 87, 178, 507, 643, 999));
+ }
+
+ public void testDifference() {
+ IntSetBase intset1 = newSet(1013, 3, 7, 34, 87, 178, 244, 507, 643,
+ 765, 999);
+ IntSetBase intset2 = newSet(1013);
+
+ assertSetsEqual(intset1.clone().difference(intset2), intset1);
+ assertSetsEqual(intset2.clone().difference(intset1), intset1);
+
+ assertSetsEqual(intset1.clone().difference(intset1.clone()), intset2);
+ intset2 = newSet(1013);
+ fill(intset2);
+ assertSetsEqual(intset1.clone().difference(intset2),
+ intset1.clone().complement());
+ assertSetsEqual(intset2.clone().difference(intset1),
+ intset1.clone().complement());
+
+ assertSetsEqual(intset1.clone().difference(newSet(1013, 34, 63, 64, 65,
+ 107, 244, 340, 765, 894, 1012)),
+ newSet(1013, 3, 7, 63, 64, 65, 87, 107, 178, 340, 507, 643, 894,
+ 999, 1012));
+ }
+}
Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestBitSet.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestBitSet.java?rev=897547&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestBitSet.java (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestBitSet.java Sat Jan 9 21:09:59 2010
@@ -0,0 +1,44 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.sets;
+
+import org.apache.hadoop.hbase.util.ClassSize;
+
+/**
+ * Tests the {@link BitSet}.
+ */
+public class TestBitSet extends IntSetBaseTestCase {
+
+
+ @Override
+ protected IntSetBase newSet(int capacity, int... sortedElements) {
+ return createBitSet(capacity, sortedElements);
+ }
+
+ /**
+ * Tests that the heap size estimate of the fixed parts matches the
+ * FIXED SIZE constant.
+ */
+ public void testHeapSize() {
+ assertEquals(ClassSize.estimateBase(BitSet.class, false),
+ BitSet.FIXED_SIZE);
+ }
+
+}
Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestSparseBitSet.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestSparseBitSet.java?rev=897547&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestSparseBitSet.java (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/test/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestSparseBitSet.java Sat Jan 9 21:09:59 2010
@@ -0,0 +1,43 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.idx.support.sets;
+
+import org.apache.hadoop.hbase.util.ClassSize;
+
+/**
+ * Tests the {@link SparseBitSet} implementation.
+ */
+public class TestSparseBitSet extends IntSetBaseTestCase {
+
+ @Override
+ protected IntSetBase newSet(int max, int... sortedElements) {
+ return createSparseBitSet(max, sortedElements);
+ }
+
+ /**
+ * Tests that the heap size estimate of the fixed parts matches the
+ * FIXED SIZE constant.
+ */
+ public void testHeapSize() {
+ assertEquals(ClassSize.estimateBase(SparseBitSet.class, false),
+ SparseBitSet.FIXED_SIZE);
+ }
+
+}
\ No newline at end of file
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/docs/src/documentation/content/xdocs/metrics.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/docs/src/documentation/content/xdocs/metrics.xml?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/docs/src/documentation/content/xdocs/metrics.xml (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/docs/src/documentation/content/xdocs/metrics.xml Sat Jan 9 21:09:59 2010
@@ -123,48 +123,20 @@
<section>
<title>Configure JMX in HBase startup</title>
<p>
- Finally, edit the <code>$HBASE_HOME/conf/hbase-env.sh</code> and
- <code>$HBASE_HOME/bin/hbase</code> scripts for JMX support:
+ Finally, edit the <code>$HBASE_HOME/conf/hbase-env.sh</code>
+ script to add JMX support:
</p>
<dl>
<dt><code>$HBASE_HOME/conf/hbase-env.sh</code></dt>
<dd>
<p>Add the lines:</p>
<source>
-JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false"
-JMX_OPTS="$JMX_OPTS -Dcom.sun.management.jmxremote.password.file=$HBASE_HOME/conf/jmxremote.passwd"
-JMX_OPTS="$JMX_OPTS -Dcom.sun.management.jmxremote.access.file=$HBASE_HOME/conf/jmxremote.access"
+HBASE_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false"
+HBASE_JMX_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.password.file=$HBASE_HOME/conf/jmxremote.passwd"
+HBASE_JMX_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.access.file=$HBASE_HOME/conf/jmxremote.access"
-export HBASE_MASTER_OPTS="$JMX_OPTS -Dcom.sun.management.jmxremote.port=10101"
-export HBASE_REGIONSERVER_OPTS="$JMX_OPTS -Dcom.sun.management.jmxremote.port=10102"
- </source>
- </dd>
- <dt><code>$HBASE_HOME/bin/hbase</code></dt>
- <dd>
- <p>Towards the end of the script, replace the lines:</p>
- <source>
- # figure out which class to run
-if [ "$COMMAND" = "shell" ] ; then
- CLASS="org.jruby.Main ${HBASE_HOME}/bin/hirb.rb"
-elif [ "$COMMAND" = "master" ] ; then
- CLASS='org.apache.hadoop.hbase.master.HMaster'
-elif [ "$COMMAND" = "regionserver" ] ; then
- CLASS='org.apache.hadoop.hbase.regionserver.HRegionServer'
- </source>
- <p>
- with the lines: (adding the "HBASE_OPTS=..." lines for "master" and
- "regionserver" commands)
- </p>
- <source>
- # figure out which class to run
-if [ "$COMMAND" = "shell" ] ; then
- CLASS="org.jruby.Main ${HBASE_HOME}/bin/hirb.rb"
-elif [ "$COMMAND" = "master" ] ; then
- CLASS='org.apache.hadoop.hbase.master.HMaster'
- HBASE_OPTS="$HBASE_OPTS $HBASE_MASTER_OPTS"
-elif [ "$COMMAND" = "regionserver" ] ; then
- CLASS='org.apache.hadoop.hbase.regionserver.HRegionServer'
- HBASE_OPTS="$HBASE_OPTS $HBASE_REGIONSERVER_OPTS"
+export HBASE_MASTER_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.port=10101"
+export HBASE_REGIONSERVER_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.port=10102"
</source>
</dd>
</dl>
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ClusterStatus.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ClusterStatus.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ClusterStatus.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ClusterStatus.java Sat Jan 9 21:09:59 2010
@@ -250,4 +250,4 @@
this.intransition.put(key, value);
}
}
-}
\ No newline at end of file
+}
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java Sat Jan 9 21:09:59 2010
@@ -111,6 +111,9 @@
/** Default region server interface class name. */
static final String DEFAULT_REGION_SERVER_CLASS = HRegionInterface.class.getName();
+ /** Parameter name for what region implementation to use. */
+ static final String REGION_IMPL= "hbase.hregion.impl";
+
/** Parameter name for how often threads should wake up */
static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency";
@@ -284,4 +287,20 @@
TABLE_SET_HTD,
TABLE_SPLIT
}
+
+ /**
+ * Parameter name for maximum number of bytes returned when calling a
+ * scanner's next method.
+ */
+ public static String HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY = "hbase.client.scanner.max.result.size";
+
+ /**
+ * Maximum number of bytes returned when calling a scanner's next method.
+ * Note that when a single row is larger than this limit the row is still
+ * returned completely.
+ *
+ * The default value is unlimited.
+ */
+ public static long DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE = Long.MAX_VALUE;
+
}
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HMerge.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HMerge.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HMerge.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HMerge.java Sat Jan 9 21:09:59 2010
@@ -150,12 +150,12 @@
for (int i = 0; i < info.length - 1; i++) {
if (currentRegion == null) {
currentRegion =
- new HRegion(tabledir, hlog, fs, conf, info[i], null);
+ HRegion.newHRegion(tabledir, hlog, fs, conf, info[i], null);
currentRegion.initialize(null, null);
currentSize = currentRegion.getLargestHStoreSize();
}
nextRegion =
- new HRegion(tabledir, hlog, fs, conf, info[i + 1], null);
+ HRegion.newHRegion(tabledir, hlog, fs, conf, info[i + 1], null);
nextRegion.initialize(null, null);
nextSize = nextRegion.getLargestHStoreSize();
@@ -324,7 +324,7 @@
// Scan root region to find all the meta regions
- root = new HRegion(rootTableDir, hlog, fs, conf,
+ root = HRegion.newHRegion(rootTableDir, hlog, fs, conf,
HRegionInfo.ROOT_REGIONINFO, null);
root.initialize(null, null);
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/Leases.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/Leases.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/Leases.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/Leases.java Sat Jan 9 21:09:59 2010
@@ -183,11 +183,13 @@
public void renewLease(final String leaseName) throws LeaseException {
synchronized (leaseQueue) {
Lease lease = leases.get(leaseName);
- if (lease == null) {
+ // We need to check to see if the remove is successful as the poll in the run()
+ // method could have completed between the get and the remove which will result
+ // in a corrupt leaseQueue.
+ if (lease == null || !leaseQueue.remove(lease)) {
throw new LeaseException("lease '" + leaseName +
- "' does not exist");
+ "' does not exist or has already expired");
}
- leaseQueue.remove(lease);
lease.setExpirationTime(System.currentTimeMillis() + leasePeriod);
leaseQueue.add(lease);
}
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java Sat Jan 9 21:09:59 2010
@@ -326,15 +326,16 @@
if (this.master == null) {
throw new MasterNotRunningException("master has been shut down");
}
- try {
- this.master.enableTable(tableName);
- } catch (RemoteException e) {
- throw RemoteExceptionHandler.decodeRemoteException(e);
- }
+
// Wait until all regions are enabled
boolean enabled = false;
for (int tries = 0; tries < this.numRetries; tries++) {
+ try {
+ this.master.enableTable(tableName);
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
enabled = isTableEnabled(tableName);
if (enabled) break;
long sleep = getPauseTime(tries);
@@ -382,15 +383,16 @@
if (this.master == null) {
throw new MasterNotRunningException("master has been shut down");
}
- try {
- this.master.disableTable(tableName);
- } catch (RemoteException e) {
- throw RemoteExceptionHandler.decodeRemoteException(e);
- }
// Wait until all regions are disabled
boolean disabled = false;
for (int tries = 0; tries < this.numRetries; tries++) {
+
+ try {
+ this.master.disableTable(tableName);
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
disabled = isTableDisabled(tableName);
if (disabled) break;
if (LOG.isDebugEnabled()) {
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HTable.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HTable.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HTable.java Sat Jan 9 21:09:59 2010
@@ -70,7 +70,8 @@
private boolean autoFlush;
private long currentWriteBufferSize;
protected int scannerCaching;
-
+ private long maxScannerResultSize;
+
/**
* Creates an object to access a HBase table
*
@@ -129,6 +130,9 @@
this.autoFlush = true;
this.currentWriteBufferSize = 0;
this.scannerCaching = conf.getInt("hbase.client.scanner.caching", 1);
+ this.maxScannerResultSize = conf.getLong(
+ HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
+ HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
}
/**
@@ -1954,6 +1958,7 @@
}
if (cache.size() == 0) {
Result [] values = null;
+ long remainingResultSize = maxScannerResultSize;
int countdown = this.caching;
// We need to reset it if it's a new callable that was created
// with a countdown in nextScanner
@@ -2001,12 +2006,15 @@
if (values != null && values.length > 0) {
for (Result rs : values) {
cache.add(rs);
+ for (KeyValue kv : rs.raw()) {
+ remainingResultSize -= kv.heapSize();
+ }
countdown--;
this.lastResult = rs;
}
}
// Values == null means server-side filter has determined we must STOP
- } while (countdown > 0 && nextScanner(countdown, values == null));
+ } while (remainingResultSize > 0 && countdown > 0 && nextScanner(countdown, values == null));
}
if (cache.size() > 0) {
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Scan.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Scan.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Scan.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Scan.java Sat Jan 9 21:09:59 2010
@@ -27,6 +27,9 @@
import java.util.NavigableSet;
import java.util.TreeMap;
import java.util.TreeSet;
+import java.util.HashMap;
+import java.util.Collections;
+import java.util.SortedSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
@@ -34,7 +37,9 @@
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.RowFilterInterface;
import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
@@ -74,6 +79,15 @@
* execute {@link #setCacheBlocks(boolean)}.
*/
public class Scan implements Writable {
+ // An empty navigable set to be used when adding a whole family
+ private static final NavigableSet<byte[]> EMPTY_NAVIGABLE_SET
+ = new TreeSet<byte[]>();
+ // Version -1 first scan version. negative number used to distnguish
+ // from previous version which would use this value to envode the start-key
+ // length
+ private static final byte SCAN_VERSION = (byte) -1;
+
+
private byte [] startRow = HConstants.EMPTY_START_ROW;
private byte [] stopRow = HConstants.EMPTY_END_ROW;
private int maxVersions = 1;
@@ -84,6 +98,9 @@
private TimeRange tr = new TimeRange();
private Map<byte [], NavigableSet<byte []>> familyMap =
new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
+ // additional data for the scan
+ protected Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
+ new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
/**
* Create a Scan operation across all rows.
@@ -153,7 +170,7 @@
*/
public Scan addFamily(byte [] family) {
familyMap.remove(family);
- familyMap.put(family, null);
+ familyMap.put(family, EMPTY_NAVIGABLE_SET);
return this;
}
@@ -271,8 +288,8 @@
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
* @throws IOException if invalid time range
- * @see {@link #setMaxVersions()}
- * @see {@link #setMaxVersions(int)}
+ * @see #setMaxVersions()
+ * @see #setMaxVersions(int)
*/
public Scan setTimeRange(long minStamp, long maxStamp)
throws IOException {
@@ -286,8 +303,8 @@
* and you want all versions returned, up the number of versions beyond the
* defaut.
* @param timestamp version timestamp
- * @see {@link #setMaxVersions()}
- * @see {@link #setMaxVersions(int)}
+ * @see #setMaxVersions()
+ * @see #setMaxVersions(int)
*/
public Scan setTimeStamp(long timestamp) {
try {
@@ -489,6 +506,80 @@
public boolean getCacheBlocks() {
return cacheBlocks;
}
+
+ /**
+ * @param key The key.
+ * @return The value.
+ */
+ public byte[] getValue(byte[] key) {
+ return getValue(new ImmutableBytesWritable(key));
+ }
+
+ private byte[] getValue(final ImmutableBytesWritable key) {
+ ImmutableBytesWritable ibw = values.get(key);
+ if (ibw == null)
+ return null;
+ return ibw.get();
+ }
+
+ /**
+ * @param key The key.
+ * @return The value as a string.
+ */
+ public String getValue(String key) {
+ byte[] value = getValue(Bytes.toBytes(key));
+ if (value == null)
+ return null;
+ return Bytes.toString(value);
+ }
+
+ /**
+ * @return All values.
+ */
+ public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
+ return Collections.unmodifiableMap(values);
+ }
+
+ /**
+ * @param key The key.
+ * @param value The value.
+ */
+ public void setValue(byte[] key, byte[] value) {
+ setValue(new ImmutableBytesWritable(key), value);
+ }
+
+ /*
+ * @param key The key.
+ * @param value The value.
+ */
+ private void setValue(final ImmutableBytesWritable key,
+ final byte[] value) {
+ values.put(key, new ImmutableBytesWritable(value));
+ }
+
+ /*
+ * @param key The key.
+ * @param value The value.
+ */
+ private void setValue(final ImmutableBytesWritable key,
+ final ImmutableBytesWritable value) {
+ values.put(key, value);
+ }
+
+ /**
+ * @param key The key.
+ * @param value The value.
+ */
+ public void setValue(String key, String value) {
+ setValue(Bytes.toBytes(key), Bytes.toBytes(value));
+ }
+
+ /**
+ * @param key Key whose key and value we're to remove from HTD parameters.
+ */
+ public void remove(final byte [] key) {
+ values.remove(new ImmutableBytesWritable(key));
+ }
/**
* @return String
@@ -541,6 +632,21 @@
}
}
sb.append("}");
+
+ for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+ values.entrySet()) {
+ String key = Bytes.toString(e.getKey().get());
+ String value = Bytes.toString(e.getValue().get());
+ if (key == null) {
+ continue;
+ }
+ sb.append(", ");
+ sb.append(key);
+ sb.append(" => '");
+ sb.append(value);
+ sb.append("'");
+ }
+
return sb.toString();
}
@@ -558,7 +664,13 @@
//Writable
public void readFields(final DataInput in)
throws IOException {
- this.startRow = Bytes.readByteArray(in);
+ byte versionOrLength = in.readByte();
+ if (versionOrLength == SCAN_VERSION) {
+ this.startRow = Bytes.readByteArray(in);
+ } else {
+ int length = (int) Writables.readVLong(in, versionOrLength);
+ this.startRow = Bytes.readByteArray(in, length);
+ }
this.stopRow = Bytes.readByteArray(in);
this.maxVersions = in.readInt();
this.caching = in.readInt();
@@ -587,10 +699,22 @@
}
this.familyMap.put(family, set);
}
- }
+ this.values.clear();
+ if (versionOrLength == SCAN_VERSION) {
+ int numValues = in.readInt();
+ for (int i = 0; i < numValues; i++) {
+ ImmutableBytesWritable key = new ImmutableBytesWritable();
+ ImmutableBytesWritable value = new ImmutableBytesWritable();
+ key.readFields(in);
+ value.readFields(in);
+ values.put(key, value);
+ }
+ }
+ }
public void write(final DataOutput out)
throws IOException {
+ out.writeByte(SCAN_VERSION);
Bytes.writeByteArray(out, this.startRow);
Bytes.writeByteArray(out, this.stopRow);
out.writeInt(this.maxVersions);
@@ -624,5 +748,11 @@
out.writeInt(0);
}
}
+ out.writeInt(values.size());
+ for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+ values.entrySet()) {
+ e.getKey().write(out);
+ e.getValue().write(out);
+ }
}
}
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/FilterList.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/FilterList.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/FilterList.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/FilterList.java Sat Jan 9 21:09:59 2010
@@ -24,6 +24,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -156,17 +157,19 @@
}
public ReturnCode filterKeyValue(KeyValue v) {
- for (Filter filter : filters) {
+ Iterator<Filter> iter = filters.iterator();
+ while (iter.hasNext()) {
+ Filter filter = iter.next();
if (operator == Operator.MUST_PASS_ALL) {
if (filter.filterAllRemaining()) {
return ReturnCode.NEXT_ROW;
}
switch (filter.filterKeyValue(v)) {
- case INCLUDE:
- continue;
- case NEXT_ROW:
- case SKIP:
- return ReturnCode.SKIP;
+ case INCLUDE:
+ continue;
+ case NEXT_ROW:
+ case SKIP:
+ return ReturnCode.SKIP;
}
} else if (operator == Operator.MUST_PASS_ONE) {
if (filter.filterAllRemaining()) {
@@ -174,11 +177,16 @@
}
switch (filter.filterKeyValue(v)) {
- case INCLUDE:
- return ReturnCode.INCLUDE;
- case NEXT_ROW:
- case SKIP:
- continue;
+ case INCLUDE:
+ // let all the other filters look at this KeyValue since their correct operation may depend on it
+ while (iter.hasNext()){
+ Filter nextFilter = iter.next();
+ nextFilter.filterKeyValue(v);
+ }
+ return ReturnCode.INCLUDE;
+ case NEXT_ROW:
+ case SKIP:
+ continue;
}
}
}
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java Sat Jan 9 21:09:59 2010
@@ -154,6 +154,36 @@
public final static String DEFAULT_COMPRESSION =
DEFAULT_COMPRESSION_ALGORITHM.getName();
+ // For measuring latency of "typical" reads and writes
+ private static volatile long readOps;
+ private static volatile long readTime;
+ private static volatile long writeOps;
+ private static volatile long writeTime;
+
+ public static final long getReadOps() {
+ long ret = readOps;
+ readOps = 0;
+ return ret;
+ }
+
+ public static final long getReadTime() {
+ long ret = readTime;
+ readTime = 0;
+ return ret;
+ }
+
+ public static final long getWriteOps() {
+ long ret = writeOps;
+ writeOps = 0;
+ return ret;
+ }
+
+ public static final long getWriteTime() {
+ long ret = writeTime;
+ writeTime = 0;
+ return ret;
+ }
+
/**
* HFile Writer.
*/
@@ -320,12 +350,17 @@
*/
private void finishBlock() throws IOException {
if (this.out == null) return;
+ long now = System.currentTimeMillis();
+
int size = releaseCompressingStream(this.out);
this.out = null;
blockKeys.add(firstKey);
blockOffsets.add(Long.valueOf(blockBegin));
blockDataSizes.add(Integer.valueOf(size));
this.totalBytes += size;
+
+ writeTime += System.currentTimeMillis() - now;
+ writeOps++;
}
/*
@@ -887,6 +922,7 @@
buf.rewind();
return buf;
}
+
/**
* Read in a file block.
* @param block Index of block to read.
@@ -901,7 +937,6 @@
throw new IOException("Requested block is out of range: " + block +
", max: " + blockIndex.count);
}
-
// For any given block from any given file, synchronize reads for said
// block.
// Without a cache, this synchronizing is needless overhead, but really
@@ -921,6 +956,7 @@
}
// Load block from filesystem.
+ long now = System.currentTimeMillis();
long onDiskBlockSize;
if (block == blockIndex.count - 1) {
// last block! The end of data block is first meta block if there is
@@ -945,6 +981,9 @@
buf.limit(buf.limit() - DATABLOCKMAGIC.length);
buf.rewind();
+ readTime += System.currentTimeMillis() - now;
+ readOps++;
+
// Cache the block
if(cacheBlock && cache != null) {
cache.cacheBlock(name + block, buf.duplicate(), inMemory);
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java Sat Jan 9 21:09:59 2010
@@ -79,6 +79,14 @@
continue;
}
+ if(!this.online && this.master.regionManager.
+ isPendingOpen(i.getRegionNameAsString())) {
+ LOG.debug("Skipping region " + i.toString() +
+ " because it is pending open, will tell it to close later");
+ continue;
+ }
+
+
// Update meta table
Put put = updateRegionInfo(i);
server.put(m.getRegionName(), put);
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java Sat Jan 9 21:09:59 2010
@@ -161,6 +161,9 @@
private MasterMetrics metrics;
+ private long lastFragmentationQuery = -1L;
+ private Map<String, Integer> fragmentation = null;
+
/**
* Build the HMaster out of a raw configuration item.
* @param conf configuration
@@ -261,6 +264,7 @@
return false;
} else if(zooKeeperWrapper.writeMasterAddress(address)) {
zooKeeperWrapper.setClusterState(true);
+ zooKeeperWrapper.setClusterStateWatch(zkMasterAddressWatcher);
// Watch our own node
zooKeeperWrapper.readMasterAddress(this);
return true;
@@ -1179,6 +1183,17 @@
}
}
+ public Map<String, Integer> getTableFragmentation() throws IOException {
+ long now = System.currentTimeMillis();
+ // only check every two minutes by default
+ int check = this.conf.getInt("hbase.master.fragmentation.check.frequency", 2 * 60 * 1000);
+ if (lastFragmentationQuery == -1 || now - lastFragmentationQuery > check) {
+ fragmentation = FSUtils.getTableFragmentation(this);
+ lastFragmentationQuery = now;
+ }
+ return fragmentation;
+ }
+
/*
* Main program
*/
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java Sat Jan 9 21:09:59 2010
@@ -59,33 +59,36 @@
@Override
protected boolean process() throws IOException {
Boolean result = null;
- if (offlineRegion) {
+ if (offlineRegion || reassignRegion) {
result =
new RetryableMetaOperation<Boolean>(getMetaRegion(), this.master) {
public Boolean call() throws IOException {
- LOG.info("region closed: " + regionInfo.getRegionNameAsString());
// We can't proceed unless the meta region we are going to update
// is online. metaRegionAvailable() will put this operation on the
// delayedToDoQueue, so return true so the operation is not put
// back on the toDoQueue
- if (metaRegionAvailable()) {
+ if(offlineRegion) {
// offline the region in meta and then remove it from the
// set of regions in transition
HRegion.offlineRegionInMETA(server, metaRegionName,
regionInfo);
master.regionManager.removeRegion(regionInfo);
+ LOG.info("region closed: " + regionInfo.getRegionNameAsString());
+ } else {
+ // we are reassigning the region eventually, so set it unassigned
+ // and remove the server info
+ HRegion.cleanRegionInMETA(server, metaRegionName,
+ regionInfo);
+ master.regionManager.setUnassigned(regionInfo, false);
+ LOG.info("region set as unassigned: " + regionInfo.getRegionNameAsString());
}
return true;
}
}.doWithRetries();
result = result == null ? true : result;
- } else if (reassignRegion) {
- LOG.info("region set as unassigned: " + regionInfo.getRegionNameAsString());
- // we are reassigning the region eventually, so set it unassigned
- master.regionManager.setUnassigned(regionInfo, false);
} else {
LOG.info("Region was neither offlined, or asked to be reassigned, what gives: " +
regionInfo.getRegionNameAsString());
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java Sat Jan 9 21:09:59 2010
@@ -227,4 +227,14 @@
this.interrupt();
}
}
+
+ /**
+ * Returns the current size of the queue containing regions that are
+ * processed.
+ *
+ * @return The current size of the regions queue.
+ */
+ public int getCompactionQueueSize() {
+ return compactionQueue.size();
+ }
}
\ No newline at end of file
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java Sat Jan 9 21:09:59 2010
@@ -130,7 +130,7 @@
Collections.synchronizedSortedMap(new TreeMap<Long, Path>());
/*
- * Map of region to last sequence/edit id.
+ * Map of regions to first sequence/edit id in their memstore.
*/
private final ConcurrentSkipListMap<byte [], Long> lastSeqWritten =
new ConcurrentSkipListMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
@@ -175,6 +175,37 @@
}
}
+ // For measuring latency of writes
+ private static volatile long writeOps;
+ private static volatile long writeTime;
+ // For measuring latency of syncs
+ private static volatile long syncOps;
+ private static volatile long syncTime;
+
+ public static long getWriteOps() {
+ long ret = writeOps;
+ writeOps = 0;
+ return ret;
+ }
+
+ public static long getWriteTime() {
+ long ret = writeTime;
+ writeTime = 0;
+ return ret;
+ }
+
+ public static long getSyncOps() {
+ long ret = syncOps;
+ syncOps = 0;
+ return ret;
+ }
+
+ public static long getSyncTime() {
+ long ret = syncTime;
+ syncTime = 0;
+ return ret;
+ }
+
/**
* Create an edit log at the given <code>dir</code> location.
*
@@ -290,21 +321,21 @@
* cacheFlushLock and then completeCacheFlush could be called which would wait
* for the lock on this and consequently never release the cacheFlushLock
*
- * @return If lots of logs, flush the returned region so next time through
+ * @return If lots of logs, flush the returned regions so next time through
* we can clean logs. Returns null if nothing to flush.
* @throws FailedLogCloseException
* @throws IOException
*/
- public byte [] rollWriter() throws FailedLogCloseException, IOException {
+ public byte [][] rollWriter() throws FailedLogCloseException, IOException {
// Return if nothing to flush.
if (this.writer != null && this.numEntries.get() <= 0) {
return null;
}
- byte [] regionToFlush = null;
+ byte [][] regionsToFlush = null;
this.cacheFlushLock.lock();
try {
if (closed) {
- return regionToFlush;
+ return regionsToFlush;
}
synchronized (updateLock) {
// Clean up current writer.
@@ -330,7 +361,7 @@
}
this.outputfiles.clear();
} else {
- regionToFlush = cleanOldLogs();
+ regionsToFlush = cleanOldLogs();
}
}
this.numEntries.set(0);
@@ -340,7 +371,7 @@
} finally {
this.cacheFlushLock.unlock();
}
- return regionToFlush;
+ return regionsToFlush;
}
protected SequenceFile.Writer createWriter(Path path) throws IOException {
@@ -363,8 +394,7 @@
* we can clean logs. Returns null if nothing to flush.
* @throws IOException
*/
- private byte [] cleanOldLogs() throws IOException {
- byte [] regionToFlush = null;
+ private byte [][] cleanOldLogs() throws IOException {
Long oldestOutstandingSeqNum = getOldestOutstandingSeqNum();
// Get the set of all log files whose final ID is older than or
// equal to the oldest pending region operation
@@ -372,29 +402,60 @@
new TreeSet<Long>(this.outputfiles.headMap(
(Long.valueOf(oldestOutstandingSeqNum.longValue() + 1L))).keySet());
// Now remove old log files (if any)
- byte [] oldestRegion = null;
- if (LOG.isDebugEnabled()) {
- // Find region associated with oldest key -- helps debugging.
- oldestRegion = getOldestRegion(oldestOutstandingSeqNum);
- LOG.debug("Found " + sequenceNumbers.size() + " hlogs to remove " +
- " out of total " + this.outputfiles.size() + "; " +
- "oldest outstanding seqnum is " + oldestOutstandingSeqNum +
- " from region " + Bytes.toStringBinary(oldestRegion));
- }
- if (sequenceNumbers.size() > 0) {
+ int logsToRemove = sequenceNumbers.size();
+ if (logsToRemove > 0) {
+ if (LOG.isDebugEnabled()) {
+ // Find associated region; helps debugging.
+ byte [] oldestRegion = getOldestRegion(oldestOutstandingSeqNum);
+ LOG.debug("Found " + logsToRemove + " hlogs to remove " +
+ " out of total " + this.outputfiles.size() + "; " +
+ "oldest outstanding seqnum is " + oldestOutstandingSeqNum +
+ " from region " + Bytes.toString(oldestRegion));
+ }
for (Long seq : sequenceNumbers) {
deleteLogFile(this.outputfiles.remove(seq), seq);
}
}
- int countOfLogs = this.outputfiles.size() - sequenceNumbers.size();
- if (countOfLogs > this.maxLogs) {
- regionToFlush = oldestRegion != null?
- oldestRegion: getOldestRegion(oldestOutstandingSeqNum);
- LOG.info("Too many hlogs: logs=" + countOfLogs + ", maxlogs=" +
- this.maxLogs + "; forcing flush of region with oldest edits: " +
- Bytes.toStringBinary(regionToFlush));
+
+ // If too many log files, figure which regions we need to flush.
+ byte [][] regions = null;
+ int logCount = this.outputfiles.size() - logsToRemove;
+ if (logCount > this.maxLogs && this.outputfiles != null &&
+ this.outputfiles.size() > 0) {
+ regions = findMemstoresWithEditsOlderThan(this.outputfiles.firstKey(),
+ this.lastSeqWritten);
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < regions.length; i++) {
+ if (i > 0) sb.append(", ");
+ sb.append(Bytes.toStringBinary(regions[i]));
+ }
+ LOG.info("Too many hlogs: logs=" + logCount + ", maxlogs=" +
+ this.maxLogs + "; forcing flush of " + regions.length + " regions(s): " +
+ sb.toString());
+ }
+ return regions;
+ }
+
+ /**
+ * Return regions (memstores) that have edits that are less than the passed
+ * <code>oldestWALseqid</code>.
+ * @param oldestWALseqid
+ * @param regionsToSeqids
+ * @return All regions whose seqid is < than <code>oldestWALseqid</code> (Not
+ * necessarily in order). Null if no regions found.
+ */
+ static byte [][] findMemstoresWithEditsOlderThan(final long oldestWALseqid,
+ final Map<byte [], Long> regionsToSeqids) {
+ // This method is static so it can be unit tested the easier.
+ List<byte []> regions = null;
+ for (Map.Entry<byte [], Long> e: regionsToSeqids.entrySet()) {
+ if (e.getValue().longValue() < oldestWALseqid) {
+ if (regions == null) regions = new ArrayList<byte []>();
+ regions.add(e.getKey());
+ }
}
- return regionToFlush;
+ return regions == null?
+ null: regions.toArray(new byte [][] {HConstants.EMPTY_BYTE_ARRAY});
}
/*
@@ -537,7 +598,8 @@
long seqNum = obtainSeqNum();
logKey.setLogSeqNum(seqNum);
// The 'lastSeqWritten' map holds the sequence number of the oldest
- // write for each region. When the cache is flushed, the entry for the
+ // write for each region (i.e. the first edit added to the particular
+ // memstore). When the cache is flushed, the entry for the
// region being flushed is removed if the sequence number of the flush
// is greater than or equal to the value in lastSeqWritten.
this.lastSeqWritten.putIfAbsent(regionName, Long.valueOf(seqNum));
@@ -586,7 +648,8 @@
long seqNum [] = obtainSeqNum(edits.size());
synchronized (this.updateLock) {
// The 'lastSeqWritten' map holds the sequence number of the oldest
- // write for each region. When the cache is flushed, the entry for the
+ // write for each region (i.e. the first edit added to the particular
+ // memstore). . When the cache is flushed, the entry for the
// region being flushed is removed if the sequence number of the flush
// is greater than or equal to the value in lastSeqWritten.
this.lastSeqWritten.putIfAbsent(regionName, Long.valueOf(seqNum[0]));
@@ -615,6 +678,8 @@
this.writer.sync();
}
this.unflushedEntries.set(0);
+ syncTime += System.currentTimeMillis() - lastLogFlushTime;
+ syncOps++;
}
void optionalSync() {
@@ -653,14 +718,16 @@
try {
this.editsSize.addAndGet(logKey.heapSize() + logEdit.heapSize());
this.writer.append(logKey, logEdit);
- if (sync || this.unflushedEntries.incrementAndGet() >= flushlogentries) {
- sync();
- }
long took = System.currentTimeMillis() - now;
+ writeTime += took;
+ writeOps++;
if (took > 1000) {
LOG.warn(Thread.currentThread().getName() + " took " + took +
"ms appending an edit to hlog; editcount=" + this.numEntries.get());
}
+ if (sync || this.unflushedEntries.incrementAndGet() >= flushlogentries) {
+ sync();
+ }
} catch (IOException e) {
LOG.fatal("Could not append. Requesting close of hlog", e);
requestLogRoll();
@@ -734,8 +801,11 @@
return;
}
synchronized (updateLock) {
+ long now = System.currentTimeMillis();
this.writer.append(makeKey(regionName, tableName, logSeqId, System.currentTimeMillis()),
completeCacheFlushLogEdit());
+ writeTime += System.currentTimeMillis() - now;
+ writeOps++;
this.numEntries.incrementAndGet();
Long seq = this.lastSeqWritten.get(regionName);
if (seq != null && logSeqId >= seq.longValue()) {
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Sat Jan 9 21:09:59 2010
@@ -19,53 +19,54 @@
*/
package org.apache.hadoop.hbase.regionserver;
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.DroppedSnapshotException;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.NotServingRegionException;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.RowFilterInterface;
-import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.io.Reference.Range;
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.ipc.HRegionInterface;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.ClassSize;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.StringUtils;
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
+ import org.apache.hadoop.fs.FSDataOutputStream;
+ import org.apache.hadoop.fs.FileStatus;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hbase.DroppedSnapshotException;
+ import org.apache.hadoop.hbase.HBaseConfiguration;
+ import org.apache.hadoop.hbase.HColumnDescriptor;
+ import org.apache.hadoop.hbase.HConstants;
+ import org.apache.hadoop.hbase.HRegionInfo;
+ import org.apache.hadoop.hbase.HTableDescriptor;
+ import org.apache.hadoop.hbase.KeyValue;
+ import org.apache.hadoop.hbase.NotServingRegionException;
+ import org.apache.hadoop.hbase.client.Delete;
+ import org.apache.hadoop.hbase.client.Get;
+ import org.apache.hadoop.hbase.client.Put;
+ import org.apache.hadoop.hbase.client.Result;
+ import org.apache.hadoop.hbase.client.Scan;
+ import org.apache.hadoop.hbase.filter.Filter;
+ import org.apache.hadoop.hbase.filter.RowFilterInterface;
+ import org.apache.hadoop.hbase.io.HeapSize;
+ import org.apache.hadoop.hbase.io.Reference.Range;
+ import org.apache.hadoop.hbase.io.hfile.BlockCache;
+ import org.apache.hadoop.hbase.ipc.HRegionInterface;
+ import org.apache.hadoop.hbase.util.Bytes;
+ import org.apache.hadoop.hbase.util.ClassSize;
+ import org.apache.hadoop.hbase.util.FSUtils;
+ import org.apache.hadoop.hbase.util.Writables;
+ import org.apache.hadoop.util.Progressable;
+ import org.apache.hadoop.util.StringUtils;
+
+ import java.io.IOException;
+ import java.io.UnsupportedEncodingException;
+ import java.util.ArrayList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.NavigableSet;
+ import java.util.TreeMap;
+ import java.util.TreeSet;
+ import java.util.concurrent.ConcurrentHashMap;
+ import java.util.concurrent.ConcurrentSkipListMap;
+ import java.util.concurrent.atomic.AtomicBoolean;
+ import java.util.concurrent.atomic.AtomicLong;
+ import java.util.concurrent.locks.ReentrantReadWriteLock;
+ import java.lang.reflect.Constructor;
-/**
+ /**
* HRegion stores data for a certain region of a table. It stores all columns
* for each row. A given table consists of one or more HRegions.
*
@@ -228,7 +229,9 @@
}
/**
- * HRegion constructor.
+ * HRegion constructor. This constructor should only be used for testing and
+ * extensions. Instances of HRegion should be instantiated with the
+ * {@link org.apache.hadoop.hbase.regionserver.HRegion#newHRegion( org.apache.hadoop.fs.Path, HLog, org.apache.hadoop.fs.FileSystem, org.apache.hadoop.hbase.HBaseConfiguration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester)} method.
*
* @param basedir qualified path of directory where region should be located,
* usually the table directory.
@@ -246,8 +249,10 @@
* @param flushListener an object that implements CacheFlushListener or null
* making progress to master -- otherwise master might think region deploy
* failed. Can be null.
+ *
+ * @see org.apache.hadoop.hbase.regionserver.HRegion#newHRegion(org.apache.hadoop.fs.Path, HLog, org.apache.hadoop.fs.FileSystem, org.apache.hadoop.hbase.HBaseConfiguration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester)
*/
- public HRegion(Path basedir, HLog log, FileSystem fs, HBaseConfiguration conf,
+ public HRegion(Path basedir, HLog log, FileSystem fs, HBaseConfiguration conf,
HRegionInfo regionInfo, FlushRequester flushListener) {
this.basedir = basedir;
this.comparator = regionInfo.getComparator();
@@ -655,10 +660,10 @@
// Done!
// Opening the region copies the splits files from the splits directory
// under each region.
- HRegion regionA = new HRegion(basedir, log, fs, conf, regionAInfo, null);
+ HRegion regionA = HRegion.newHRegion(basedir, log, fs, conf, regionAInfo, null);
regionA.initialize(dirA, null);
regionA.close();
- HRegion regionB = new HRegion(basedir, log, fs, conf, regionBInfo, null);
+ HRegion regionB = HRegion.newHRegion(basedir, log, fs, conf, regionBInfo, null);
regionB.initialize(dirB, null);
regionB.close();
@@ -881,7 +886,7 @@
* @throws DroppedSnapshotException Thrown when replay of hlog is required
* because a Snapshot was not properly persisted.
*/
- private boolean internalFlushcache() throws IOException {
+ protected boolean internalFlushcache() throws IOException {
final long startTime = System.currentTimeMillis();
// Clear flush flag.
// Record latest flush time
@@ -908,12 +913,19 @@
this.updatesLock.writeLock().lock();
// Get current size of memstores.
final long currentMemStoreSize = this.memstoreSize.get();
+ List<StoreFlusher> storeFlushers = new ArrayList<StoreFlusher>();
try {
- for (Store s: stores.values()) {
- s.snapshot();
- }
sequenceId = log.startCacheFlush();
completeSequenceId = this.getCompleteCacheFlushSequenceId(sequenceId);
+ // create the store flushers
+ for (Store s : stores.values()) {
+ storeFlushers.add(s.getStoreFlusher(completeSequenceId));
+ }
+
+ // prepare flush (take a snapshot)
+ for (StoreFlusher flusher: storeFlushers) {
+ flusher.prepare();
+ }
} finally {
this.updatesLock.writeLock().unlock();
}
@@ -927,12 +939,29 @@
// A. Flush memstore to all the HStores.
// Keep running vector of all store files that includes both old and the
// just-made new flush store file.
- for (Store hstore: stores.values()) {
- boolean needsCompaction = hstore.flushCache(completeSequenceId);
- if (needsCompaction) {
- compactionRequested = true;
+ for (StoreFlusher flusher : storeFlushers) {
+ flusher.flushCache();
+ }
+
+ internalPreFlashcacheCommit();
+
+ /**
+ * Switch between memstore and the new store file
+ */
+ this.newScannerLock.writeLock().lock();
+ try {
+ for (StoreFlusher flusher : storeFlushers) {
+ boolean needsCompaction = flusher.commit();
+ if (needsCompaction) {
+ compactionRequested = true;
+ }
}
+ } finally {
+ this.newScannerLock.writeLock().unlock();
}
+
+ // clear the stireFlushers list
+ storeFlushers.clear();
// Set down the memstore size by amount of flush.
this.memstoreSize.addAndGet(-currentMemStoreSize);
} catch (Throwable t) {
@@ -974,8 +1003,18 @@
}
return compactionRequested;
}
-
- /**
+
+
+ /**
+ * A hook for sub classed wishing to perform operations prior to the cache
+ * flush commit stage.
+ *
+ * @throws java.io.IOException allow children to throw exception
+ */
+ protected void internalPreFlashcacheCommit() throws IOException {
+ }
+
+ /**
* Get the sequence number to be associated with this cache flush. Used by
* TransactionalRegion to not complete pending transactions.
*
@@ -1070,13 +1109,17 @@
scan.addFamily(family);
}
}
- return new RegionScanner(scan, additionalScanners);
-
+ return instantiateInternalScanner(scan, additionalScanners);
+
} finally {
newScannerLock.readLock().unlock();
}
}
+ protected InternalScanner instantiateInternalScanner(Scan scan, List<KeyValueScanner> additionalScanners) throws IOException {
+ return new RegionScanner(scan, additionalScanners);
+ }
+
//////////////////////////////////////////////////////////////////////////////
// set() methods for client use.
//////////////////////////////////////////////////////////////////////////////
@@ -1090,8 +1133,9 @@
throws IOException {
checkReadOnly();
checkResources();
- splitsAndClosesLock.readLock().lock();
Integer lid = null;
+ newScannerLock.writeLock().lock();
+ splitsAndClosesLock.readLock().lock();
try {
byte [] row = delete.getRow();
// If we did not pass an existing row lock, obtain a new one
@@ -1119,6 +1163,7 @@
} finally {
if(lockid == null) releaseRowLock(lid);
splitsAndClosesLock.readLock().unlock();
+ newScannerLock.writeLock().unlock();
}
}
@@ -1223,6 +1268,7 @@
// read lock, resources may run out. For now, the thought is that this
// will be extremely rare; we'll deal with it when it happens.
checkResources();
+ newScannerLock.writeLock().lock();
splitsAndClosesLock.readLock().lock();
try {
// We obtain a per-row lock, so other clients will block while one client
@@ -1249,6 +1295,7 @@
}
} finally {
splitsAndClosesLock.readLock().unlock();
+ newScannerLock.writeLock().unlock();
}
}
@@ -1822,6 +1869,45 @@
}
// Utility methods
+ /**
+ * A utility method to create new instances of HRegion based on the
+ * {@link org.apache.hadoop.hbase.HConstants#REGION_IMPL} configuration
+ * property.
+ * @param basedir qualified path of directory where region should be located,
+ * usually the table directory.
+ * @param log The HLog is the outbound log for any updates to the HRegion
+ * (There's a single HLog for all the HRegions on a single HRegionServer.)
+ * The log file is a logfile from the previous execution that's
+ * custom-computed for this HRegion. The HRegionServer computes and sorts the
+ * appropriate log info for this HRegion. If there is a previous log file
+ * (implying that the HRegion has been written-to before), then read it from
+ * the supplied path.
+ * @param fs is the filesystem.
+ * @param conf is global configuration settings.
+ * @param regionInfo - HRegionInfo that describes the region
+ * is new), then read them from the supplied path.
+ * @param flushListener an object that implements CacheFlushListener or null
+ * making progress to master -- otherwise master might think region deploy
+ * failed. Can be null.
+ * @return the new instance
+ */
+ public static HRegion newHRegion(Path basedir, HLog log, FileSystem fs, HBaseConfiguration conf,
+ HRegionInfo regionInfo, FlushRequester flushListener) {
+ try {
+ @SuppressWarnings("unchecked")
+ Class<? extends HRegion> regionClass =
+ (Class<? extends HRegion>) conf.getClass(HConstants.REGION_IMPL, HRegion.class);
+
+ Constructor<? extends HRegion> c =
+ regionClass.getConstructor(Path.class, HLog.class, FileSystem.class,
+ HBaseConfiguration.class, HRegionInfo.class, FlushRequester.class);
+
+ return c.newInstance(basedir, log, fs, conf, regionInfo, flushListener);
+ } catch (Throwable e) {
+ // todo: what sould I throw here?
+ throw new IllegalStateException("Could not instantiate a region instance.", e);
+ }
+ }
/**
* Convenience method creating new HRegions. Used by createTable and by the
@@ -1844,7 +1930,7 @@
Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName());
FileSystem fs = FileSystem.get(conf);
fs.mkdirs(regionDir);
- HRegion region = new HRegion(tableDir,
+ HRegion region = HRegion.newHRegion(tableDir,
new HLog(fs, new Path(regionDir, HREGION_LOGDIR_NAME), conf, null),
fs, conf, info, null);
region.initialize(null, null);
@@ -1873,7 +1959,7 @@
if (info == null) {
throw new NullPointerException("Passed region info is null");
}
- HRegion r = new HRegion(
+ HRegion r = HRegion.newHRegion(
HTableDescriptor.getTableDir(rootDir, info.getTableDesc().getName()),
log, FileSystem.get(conf), conf, info, null);
r.initialize(null, null);
@@ -1947,10 +2033,7 @@
info.setOffline(true);
put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(info));
srvr.put(metaRegionName, put);
- Delete del = new Delete(row);
- del.deleteColumns(CATALOG_FAMILY, SERVER_QUALIFIER);
- del.deleteColumns(CATALOG_FAMILY, STARTCODE_QUALIFIER);
- srvr.delete(metaRegionName, del);
+ cleanRegionInMETA(srvr, metaRegionName, info);
}
/**
@@ -2186,7 +2269,7 @@
LOG.debug("Files for new region");
listPaths(fs, newRegionDir);
}
- HRegion dstRegion = new HRegion(basedir, log, fs, conf, newRegionInfo, null);
+ HRegion dstRegion = HRegion.newHRegion(basedir, log, fs, conf, newRegionInfo, null);
dstRegion.initialize(null, null);
dstRegion.compactStores();
if (LOG.isDebugEnabled()) {
@@ -2440,9 +2523,9 @@
String metaStr = Bytes.toString(HConstants.META_TABLE_NAME);
// Currently expects tables have one region only.
if (p.getName().startsWith(rootStr)) {
- region = new HRegion(p, log, fs, c, HRegionInfo.ROOT_REGIONINFO, null);
+ region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.ROOT_REGIONINFO, null);
} else if (p.getName().startsWith(metaStr)) {
- region = new HRegion(p, log, fs, c, HRegionInfo.FIRST_META_REGIONINFO,
+ region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.FIRST_META_REGIONINFO,
null);
} else {
throw new IOException("Not a known catalog table: " + p.toString());
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Sat Jan 9 21:09:59 2010
@@ -160,6 +160,8 @@
private final int serverLeaseTimeout;
protected final int numRegionsToReport;
+
+ private final long maxScannerResultSize;
// Remote HMaster
private HMasterRegionInterface hbaseMaster;
@@ -260,6 +262,10 @@
sleeper = new Sleeper(this.msgInterval, this.stopRequested);
+ this.maxScannerResultSize = conf.getLong(
+ HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
+ HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
+
// Task thread to process requests from Master
this.worker = new Worker();
@@ -1081,6 +1087,8 @@
this.metrics.storefiles.set(storefiles);
this.metrics.memstoreSizeMB.set((int)(memstoreSize/(1024*1024)));
this.metrics.storefileIndexSizeMB.set((int)(storefileIndexSize/(1024*1024)));
+ this.metrics.compactionQueueSize.set(compactSplitThread.
+ getCompactionQueueSize());
LruBlockCache lruBlockCache = (LruBlockCache)StoreFile.getBlockCache(conf);
if (lruBlockCache != null) {
@@ -1546,7 +1554,7 @@
protected HRegion instantiateRegion(final HRegionInfo regionInfo)
throws IOException {
- HRegion r = new HRegion(HTableDescriptor.getTableDir(rootDir, regionInfo
+ HRegion r = HRegion.newHRegion(HTableDescriptor.getTableDir(rootDir, regionInfo
.getTableDesc().getName()), this.hlog, this.fs, conf, regionInfo,
this.cacheFlusher);
r.initialize(null, new Progressable() {
@@ -1874,12 +1882,16 @@
}
this.leases.renewLease(scannerName);
List<Result> results = new ArrayList<Result>();
- for (int i = 0; i < nbRows; i++) {
+ long currentScanResultSize = 0;
+ for (int i = 0; i < nbRows && currentScanResultSize < maxScannerResultSize; i++) {
requestCount.incrementAndGet();
// Collect values to be returned here
List<KeyValue> values = new ArrayList<KeyValue>();
boolean moreRows = s.next(values);
if (!values.isEmpty()) {
+ for (KeyValue kv : values) {
+ currentScanResultSize += kv.heapSize();
+ }
results.add(new Result(values));
}
if (!moreRows) {
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java Sat Jan 9 21:09:59 2010
@@ -44,8 +44,8 @@
* has same attributes as ConcurrentSkipListSet: e.g. tolerant of concurrent
* get and set and won't throw ConcurrentModificationException when iterating.
*/
-class KeyValueSkipListSet implements NavigableSet<KeyValue> {
- private final ConcurrentNavigableMap<KeyValue, KeyValue> delegatee;
+class KeyValueSkipListSet implements NavigableSet<KeyValue>, Cloneable {
+ private ConcurrentNavigableMap<KeyValue, KeyValue> delegatee;
KeyValueSkipListSet(final KeyValue.KVComparator c) {
this.delegatee = new ConcurrentSkipListMap<KeyValue, KeyValue>(c);
@@ -201,4 +201,17 @@
public <T> T[] toArray(T[] a) {
throw new UnsupportedOperationException("Not implemented");
}
+
+ @Override
+ public KeyValueSkipListSet clone() {
+ assert this.delegatee.getClass() == ConcurrentSkipListMap.class;
+ KeyValueSkipListSet clonedSet = null;
+ try {
+ clonedSet = (KeyValueSkipListSet) super.clone();
+ } catch (CloneNotSupportedException e) {
+ throw new InternalError(e.getMessage());
+ }
+ clonedSet.delegatee = ((ConcurrentSkipListMap) this.delegatee).clone();
+ return clonedSet;
+ }
}
\ No newline at end of file
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java?rev=897547&r1=897546&r2=897547&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java Sat Jan 9 21:09:59 2010
@@ -77,9 +77,9 @@
rollLock.lock(); // Don't interrupt us. We're working
try {
this.lastrolltime = now;
- byte [] regionToFlush = server.getLog().rollWriter();
- if (regionToFlush != null) {
- scheduleFlush(regionToFlush);
+ byte [][] regionsToFlush = server.getLog().rollWriter();
+ if (regionsToFlush != null) {
+ for (byte [] r: regionsToFlush) scheduleFlush(r);
}
} catch (FailedLogCloseException e) {
LOG.fatal("Forcing server shutdown", e);
@@ -142,4 +142,4 @@
rollLock.unlock();
}
}
-}
+}
\ No newline at end of file